Compare commits
89 commits
main
...
fix_starte
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
025cfafe4e |
||
|
1fba34c4e0 |
|||
|
|
779a37d5a9 |
||
|
|
2d5c559261 |
||
|
|
836faa73d6 |
||
|
|
ab017bafcd |
||
|
|
1716976994 |
||
|
|
59d64f0cf5 |
||
|
|
569c39cb97 |
||
|
|
e441dd80ed |
||
|
|
efb554ad2a |
||
|
|
fafdc36d38 |
||
|
|
61a323d97d |
||
|
|
0191d1a584 |
||
|
|
0118012a43 |
||
|
|
234ff867b9 |
||
|
|
4e92f75939 |
||
|
|
3e6474b348 |
||
|
|
d83531392b |
||
|
|
b6de042488 |
||
|
|
9c1a6de88e |
||
|
|
d366ce33fa |
||
|
|
1eae241b25 |
||
|
|
a483d55b8c |
||
|
|
933198d8be |
||
|
|
15edb5ad14 |
||
|
|
bd1bec6224 |
||
|
|
a642691fb1 |
||
|
|
9ae41d6251 |
||
|
|
24ec2bd299 |
||
|
|
749768fdb7 |
||
|
|
ae274a3e6d |
||
|
|
4939801b91 |
||
|
|
8caa5d9225 |
||
|
|
60229628a7 |
||
|
|
795c62ae22 |
||
|
|
bc82d99907 |
||
|
|
43812ceda6 |
||
|
|
bbb0838dda |
||
|
|
d3abfb5b39 |
||
|
|
2f9f80f7b6 |
||
|
|
877638bbda |
||
|
|
a1629a7553 |
||
|
|
3ecf9640b7 |
||
|
|
2f06fa9d4d |
||
|
|
72ad30ede3 |
||
|
|
8af190902c |
||
|
|
462b630de4 |
||
|
|
fd9f8c5711 |
||
|
|
59937ee9e7 |
||
|
|
2475e5a254 |
||
|
|
a44130d3c3 |
||
|
|
3cd9ba688e |
||
|
|
34d411c1bb |
||
|
|
74080df2af |
||
|
|
8c74c98b8d |
||
|
|
5099ffcab8 |
||
|
|
43119d0d18 |
||
|
|
ea918df11d |
||
|
|
e63e879af6 |
||
|
|
0b78ccd4de |
||
|
|
fbb0012045 |
||
|
|
bb01fd00fc |
||
|
|
a973e747cb |
||
|
|
ec7579d578 |
||
|
|
f6e6edbc37 |
||
|
|
590682d748 |
||
|
|
2c405f77e7 |
||
|
|
cfb29134bb |
||
|
|
e0816e58a2 |
||
|
|
93517fa3b6 |
||
|
|
c8ac453601 |
||
|
|
18e2ff2d2d |
||
|
|
226c71bb7c |
||
|
|
3d601ffa79 |
||
|
|
c188ec113c |
||
|
|
ede849aaf7 |
||
|
|
8fff7cc4a4 |
||
|
|
c205e9914a |
||
|
|
75970e5ff4 |
||
|
|
e68f6a405a |
||
|
|
faff2015c4 |
||
|
577bece3a0 |
|||
|
|
9141005e6b |
||
|
|
181606fd80 |
||
|
|
4ab322290b |
||
|
ca0bee8427 |
|||
|
|
0628398fa9 |
||
|
|
76a8584c54 |
641 changed files with 40362 additions and 14348 deletions
|
|
@ -1,3 +1,3 @@
|
|||
{
|
||||
"biome.configurationPath": "src/frontend/biome.json"
|
||||
"biome.configurationPath": "src/frontend/biome.json"
|
||||
}
|
||||
12
.dockerignore
Normal file
12
.dockerignore
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
src/frontend/node_modules
|
||||
src/frontend/build
|
||||
src/frontend/coverage
|
||||
src/frontend/test-results
|
||||
src/frontend/playwright-report
|
||||
src/frontend/.dspy_cache
|
||||
**/.DS_Store
|
||||
**/__pycache__
|
||||
**/*.pyc
|
||||
**/.pytest_cache
|
||||
**/.venv
|
||||
**/.env
|
||||
13
.env.example
13
.env.example
|
|
@ -79,12 +79,16 @@ LANGFLOW_REMOVE_API_KEYS=
|
|||
# LANGFLOW_REDIS_CACHE_EXPIRE (default: 3600)
|
||||
LANGFLOW_CACHE_TYPE=
|
||||
|
||||
# Set AUTO_LOGIN to false if you want to disable auto login
|
||||
# Set LANGFLOW_AUTO_LOGIN to false if you want to disable auto login
|
||||
# and use the login form to login. LANGFLOW_SUPERUSER and LANGFLOW_SUPERUSER_PASSWORD
|
||||
# must be set if AUTO_LOGIN is set to false
|
||||
# Values: true, false
|
||||
LANGFLOW_AUTO_LOGIN=
|
||||
|
||||
# SET LANGFLOW_ENABLE_SUPERUSER_CLI to false to disable
|
||||
# superuser creation via the CLI
|
||||
LANGFLOW_ENABLE_SUPERUSER_CLI=
|
||||
|
||||
# Superuser username
|
||||
# Example: LANGFLOW_SUPERUSER=admin
|
||||
LANGFLOW_SUPERUSER=
|
||||
|
|
@ -97,6 +101,11 @@ LANGFLOW_SUPERUSER_PASSWORD=
|
|||
# Values: true, false
|
||||
LANGFLOW_STORE_ENVIRONMENT_VARIABLES=
|
||||
|
||||
# Should enable the MCP composer feature in MCP projects
|
||||
# Values: true, false
|
||||
# Default: true
|
||||
LANGFLOW_FEATURE_MCP_COMPOSER=
|
||||
|
||||
# STORE_URL
|
||||
# Example: LANGFLOW_STORE_URL=https://api.langflow.store
|
||||
# LANGFLOW_STORE_URL=
|
||||
|
|
@ -111,4 +120,4 @@ LANGFLOW_STORE_ENVIRONMENT_VARIABLES=
|
|||
|
||||
# Value must finish with slash /
|
||||
#BACKEND_URL=http://localhost:7860/
|
||||
BACKEND_URL=
|
||||
BACKEND_URL=
|
||||
|
|
|
|||
2
.github/changes-filter.yaml
vendored
2
.github/changes-filter.yaml
vendored
|
|
@ -7,6 +7,7 @@ python:
|
|||
- "src/backend/base/pyproject.toml"
|
||||
- "src/backend/base/uv.lock"
|
||||
- "**/python_test.yml"
|
||||
- ".github/workflows/ci.yml"
|
||||
components-changes:
|
||||
- "src/backend/base/langflow/components/**"
|
||||
starter-projects-changes:
|
||||
|
|
@ -17,6 +18,7 @@ frontend:
|
|||
- "src/frontend/**"
|
||||
- "**/typescript_test.yml"
|
||||
- "**/jest_test.yml"
|
||||
- ".github/workflows/ci.yml"
|
||||
docs:
|
||||
- "docs/**"
|
||||
|
||||
|
|
|
|||
6
.github/workflows/ci.yml
vendored
6
.github/workflows/ci.yml
vendored
|
|
@ -38,7 +38,7 @@ on:
|
|||
type: string
|
||||
default: "['3.10']"
|
||||
pull_request:
|
||||
types: [synchronize, labeled]
|
||||
types: [opened, synchronize, labeled]
|
||||
merge_group:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
|
@ -120,7 +120,7 @@ jobs:
|
|||
name: Should Run CI
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should-run-ci: ${{ (contains( github.event.pull_request.labels.*.name, 'lgtm') && github.event.pull_request.draft == false) || (github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' || github.event_name == 'merge_group') }}
|
||||
should-run-ci: ${{ (github.event.pull_request.draft == false) || (github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' || github.event_name == 'merge_group') }}
|
||||
should-run-tests: ${{ !contains(github.event.pull_request.labels.*.name, 'fast-track') || github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || github.event_name == 'merge_group' }}
|
||||
steps:
|
||||
# Do anything just to make the job run
|
||||
|
|
@ -208,7 +208,7 @@ jobs:
|
|||
test-templates:
|
||||
needs: [path-filter, set-ci-condition]
|
||||
name: Test Starter Templates
|
||||
if: ${{ needs.path-filter.outputs.starter-projects == 'true' && needs.set-ci-condition.outputs.should-run-tests == 'true' }}
|
||||
if: ${{ (needs.path-filter.outputs.python == 'true' || needs.path-filter.outputs.frontend == 'true') && needs.set-ci-condition.outputs.should-run-tests == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
|
|
|
|||
6
.github/workflows/cross-platform-test.yml
vendored
6
.github/workflows/cross-platform-test.yml
vendored
|
|
@ -256,7 +256,8 @@ jobs:
|
|||
- name: Test CLI help command (Windows)
|
||||
if: matrix.os == 'windows'
|
||||
run: |
|
||||
test-env\Scripts\python.exe -m langflow --help
|
||||
call test-env\Scripts\activate.bat
|
||||
python -m langflow --help
|
||||
shell: cmd
|
||||
|
||||
- name: Test CLI help command (Unix)
|
||||
|
|
@ -508,7 +509,8 @@ jobs:
|
|||
- name: Test CLI help command (Windows)
|
||||
if: matrix.os == 'windows'
|
||||
run: |
|
||||
test-env\Scripts\python.exe -m langflow --help
|
||||
call test-env\Scripts\activate.bat
|
||||
python -m langflow --help
|
||||
shell: cmd
|
||||
|
||||
- name: Test CLI help command (Unix)
|
||||
|
|
|
|||
|
|
@ -14,7 +14,10 @@
|
|||
[](https://deepwiki.com/langflow-ai/langflow)
|
||||
|
||||
> [!CAUTION]
|
||||
> Users must update to Langflow >= 1.3 to protect against [CVE-2025-3248](https://nvd.nist.gov/vuln/detail/CVE-2025-3248).
|
||||
> - Users must update to Langflow >= 1.3 to protect against [CVE-2025-3248](https://nvd.nist.gov/vuln/detail/CVE-2025-3248)
|
||||
> - Users must update to Langflow >= 1.5.1 to protect against [CVE-2025-57760](https://github.com/langflow-ai/langflow/security/advisories/GHSA-4gv9-mp8m-592r)
|
||||
>
|
||||
> For security information, see our [Security Policy](./SECURITY.md) and [Security Advisories](https://github.com/langflow-ai/langflow/security/advisories).
|
||||
|
||||
[Langflow](https://langflow.org) is a powerful tool for building and deploying AI-powered agents and workflows. It provides developers with both a visual authoring experience and built-in API and MCP servers that turn every workflow into a tool that can be integrated into applications built on any framework or stack. Langflow comes with batteries included and supports all major LLMs, vector databases and a growing library of AI tools.
|
||||
|
||||
|
|
|
|||
125
RELEASE.md
Normal file
125
RELEASE.md
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
# Releasing Langflow
|
||||
|
||||
Langflow follows a **release-when-ready** cadence, with each cycle typically lasting 4–6 weeks depending on QA and stabilization needs.
|
||||
|
||||
## Goals
|
||||
|
||||
* Keep `main` fast-moving for everyday work while ensuring stable release builds when features mature.
|
||||
* Provide an isolated branch for QA and last-minute fixes (the release candidate, RC).
|
||||
* Preserve a linear, readable history wherever possible.
|
||||
* Ensure released code is extensively tested before publication.
|
||||
* Minimize time to resolution of critical bugs.
|
||||
|
||||
## Process Overview
|
||||
|
||||
### 1. OSS QA
|
||||
|
||||
Create an OSS release candidate (RC) branch containing `langflow` and any associated PyPI packages (e.g. `lfx`).
|
||||
During this period:
|
||||
|
||||
* QA is performed manually.
|
||||
* Bug fixes are merged into the RC branch.
|
||||
* New features continue development on `main`.
|
||||
|
||||
This step usually lasts about a week.
|
||||
|
||||
### 2. Desktop QA
|
||||
|
||||
Once OSS QA and bugfixing are complete, create a Desktop release candidate.
|
||||
|
||||
* The Desktop RC is based on the final OSS RC.
|
||||
* Manual QA is performed.
|
||||
* Bug fixes are merged into the Desktop RC.
|
||||
* New features continue on `main`.
|
||||
|
||||
This step also usually lasts about a week.
|
||||
|
||||
### 3. Release
|
||||
|
||||
After QA and bugfixing are complete for both OSS and Desktop:
|
||||
|
||||
* Final releases are cut from their respective RC branches.
|
||||
* Release timing is coordinated with Langflow’s DevRel team.
|
||||
* For at least 24 hours after release, Discord, GitHub, and other support channels should be monitored for critical bug reports.
|
||||
|
||||
## Branch Model
|
||||
|
||||
| Branch | Purpose | Merge Policy |
|
||||
| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------ |
|
||||
| **`main`** | Integration branch. All feature PRs target this by default. | **Squash & Merge** (linear history) |
|
||||
| **`release-X.Y.Z`**<br>(e.g. `release-1.4.3`) | Temporary RC branch. Active only for the release cycle. Accepts QA and blocking-bug PRs labeled `type:release`. | **Squash & Merge** within the branch.<br>Rebased onto **`main`** before final merge. |
|
||||
|
||||
## Release Steps
|
||||
|
||||
### 1. Cut Release Candidate
|
||||
|
||||
```sh
|
||||
git checkout main && git pull # Ensure local main is up to date
|
||||
git checkout -b release-X.Y.Z # Create new release candidate branch
|
||||
git push -u origin release-X.Y.Z # Push RC branch to remote
|
||||
```
|
||||
|
||||
### 2. Apply a Bugfix to RC
|
||||
|
||||
1. Create a feature branch as usual.
|
||||
2. Open a GitHub PR targeting `release-X.Y.Z`.
|
||||
3. Review and approve as normal.
|
||||
4. Merge into the RC branch after review.
|
||||
|
||||
### 3. Final Release
|
||||
|
||||
```sh
|
||||
git checkout release-X.Y.Z && git pull # Ensure RC branch is up to date
|
||||
git tag vX.Y.Z # Create final release tag
|
||||
git push origin vX.Y.Z # Push tag to remote
|
||||
```
|
||||
|
||||
### 4. Merge RC Back into Main
|
||||
|
||||
```sh
|
||||
git checkout main
|
||||
git merge --ff-only release-X.Y.Z # Fast-forward main to include RC changes
|
||||
```
|
||||
|
||||
## Merge Strategy
|
||||
|
||||
1. **Squash & Merge** everywhere for atomic commits and clean history.
|
||||
|
||||
2. While RC is open, periodically re-sync with main:
|
||||
|
||||
```sh
|
||||
git checkout release-X.Y.Z
|
||||
git fetch origin
|
||||
git rebase origin/main
|
||||
```
|
||||
|
||||
*This resolves conflicts early while keeping history linear.*
|
||||
|
||||
3. Final merge back must be fast-forward only. If not possible, rebase the RC onto `main` before merging.
|
||||
|
||||
## Versioning & Tags
|
||||
|
||||
* Follows [Semantic Versioning](https://semver.org): `MAJOR.MINOR.PATCH`.
|
||||
* RC tags use `-rc.N`, e.g. `v1.8.0-rc.1`.
|
||||
|
||||
## Roles
|
||||
|
||||
| Role | Responsibility |
|
||||
| --------------------------------------- | ----------------------------------------------------------------- |
|
||||
| **Release Captain** (rotates per cycle) | Owns timeline, branch cut, tagging, merge-back. |
|
||||
| **PR Author** | Ensures tests pass; flags PR with `type:release` if needed in RC. |
|
||||
| **CI** | Blocks merges on failing tests or missing labels. |
|
||||
|
||||
## FAQ
|
||||
|
||||
### Do we ever merge main into the RC?
|
||||
|
||||
No. Always rebase the RC onto `main` to preserve linear history.
|
||||
|
||||
### Can we automate branch deletion?
|
||||
|
||||
Not yet — merge-back and cleanup are manual.
|
||||
|
||||
### How flexible is the timeline?
|
||||
|
||||
Very flexible. QA and stabilization phases can be extended as needed for quality.
|
||||
44
SECURITY.md
44
SECURITY.md
|
|
@ -48,7 +48,15 @@ Langflow allows users to define and run **custom code components** through endpo
|
|||
|
||||
This means an attacker could send malicious code to the endpoint and have it executed on the server—leading to full system compromise, including data theft, remote shell access, or lateral movement within the network.
|
||||
|
||||
To address, upgrade to >= 1.3.0.
|
||||
**CVE**: [CVE-2025-3248](https://nvd.nist.gov/vuln/detail/CVE-2025-3248)
|
||||
**Fixed in**: Langflow >= 1.3.0
|
||||
|
||||
### Privilege Escalation via CLI Superuser Creation (Fixed in 1.5.1)
|
||||
|
||||
A privilege escalation vulnerability exists in Langflow containers where an authenticated user with RCE access can invoke the internal CLI command `langflow superuser` to create a new administrative user. This results in full superuser access, even if the user initially registered through the UI as a regular (non-admin) account.
|
||||
|
||||
**CVE**: [CVE-2025-57760](https://github.com/langflow-ai/langflow/security/advisories/GHSA-4gv9-mp8m-592r)
|
||||
**Fixed in**: Langflow >= 1.5.1
|
||||
|
||||
### No API key required if running Langflow with `LANGFLOW_AUTO_LOGIN=true` and `LANGFLOW_SKIP_AUTH_AUTO_LOGIN=true`
|
||||
|
||||
|
|
@ -59,4 +67,36 @@ Setting `LANGFLOW_SKIP_AUTH_AUTO_LOGIN=true` and `LANGFLOW_AUTO_LOGIN=true` skip
|
|||
|
||||
`LANGFLOW_SKIP_AUTH_AUTO_LOGIN=true` is the default behavior, so users do not need to change existing workflows in 1.5. To update your workflows to require authentication, set `LANGFLOW_SKIP_AUTH_AUTO_LOGIN=false`.
|
||||
|
||||
For more information, see [API keys and authentication](https://docs.langflow.org/api-keys-and-authentication).
|
||||
For more information, see [API keys and authentication](https://docs.langflow.org/api-keys-and-authentication).
|
||||
|
||||
## Security Configuration Guidelines
|
||||
|
||||
### Superuser Creation Security
|
||||
|
||||
The `langflow superuser` CLI command can present a privilege escalation risk if not properly secured.
|
||||
|
||||
#### Security Measures
|
||||
|
||||
1. **Authentication Required in Production**
|
||||
- When `LANGFLOW_AUTO_LOGIN=false`, superuser creation requires authentication
|
||||
- Use `--auth-token` parameter with a valid superuser API key or JWT token
|
||||
|
||||
2. **Disable CLI Superuser Creation**
|
||||
- Set `LANGFLOW_ENABLE_SUPERUSER_CLI=false` to disable the command entirely
|
||||
- Strongly recommended for production environments
|
||||
|
||||
3. **Secure AUTO_LOGIN Setting**
|
||||
- Default is `true` for <=1.5. This may change in a future release.
|
||||
- When `true`, creates default superuser `langflow/langflow` - **ONLY USE IN DEVELOPMENT**
|
||||
|
||||
#### Production Security Configuration
|
||||
|
||||
```bash
|
||||
# Recommended production settings
|
||||
export LANGFLOW_AUTO_LOGIN=false
|
||||
export LANGFLOW_ENABLE_SUPERUSER_CLI=false
|
||||
export LANGFLOW_SUPERUSER="<your-superuser-username>"
|
||||
export LANGFLOW_SUPERUSER_PASSWORD="<your-superuser-password>"
|
||||
export LANGFLOW_DATABASE_URL="<your-production-database-url>" # e.g. "postgresql+psycopg://langflow:secure_pass@db.internal:5432/langflow"
|
||||
export LANGFLOW_SECRET_KEY="your-strong-random-secret-key"
|
||||
```
|
||||
|
|
|
|||
|
|
@ -4,10 +4,9 @@
|
|||
* work well for content-centric websites.
|
||||
*/
|
||||
:root {
|
||||
--ifm-background-color: var(--token-primary-bg-c);
|
||||
--ifm-color-primary: hsla(330, 81%, 60%, 1);
|
||||
--ifm-navbar-link-hover-color: initial;
|
||||
--ifm-navbar-padding-vertical: 0;
|
||||
--ifm-global-radius: 16px;
|
||||
--ifm-navbar-item-padding-vertical: 0;
|
||||
--ifm-font-family-base: Inter, -apple-system, BlinkMacSystemFont, Helvetica,
|
||||
Arial, sans-serif, "Apple Color Emoji", "Segoe UI emoji";
|
||||
|
|
@ -15,6 +14,26 @@
|
|||
"Liberation Mono", Menlo, Courier, monospace;
|
||||
}
|
||||
|
||||
/* Light theme - Pure white background */
|
||||
html[data-theme="light"] {
|
||||
--ifm-color-primary: hsla(333, 71%, 51%, 1); /* Slightly darker pink for light theme */
|
||||
--ifm-background-color: var(--ifm-color-white);
|
||||
--ifm-background-surface-color: var(--ifm-color-white);
|
||||
}
|
||||
|
||||
/* Dark theme - Pure black background */
|
||||
html[data-theme="dark"] {
|
||||
--ifm-color-primary: hsla(329, 86%, 70%, 1); /* Lighter pink for dark theme */
|
||||
--ifm-background-color: var(--ifm-color-black);
|
||||
--ifm-background-surface-color: var(--ifm-color-black);
|
||||
}
|
||||
|
||||
/* override the infima navbar docs/node_modules/infima/dist/css/default/default.css */
|
||||
.navbar {
|
||||
box-shadow: none !important;
|
||||
border-bottom: 1px solid var(--ifm-toc-border-color);
|
||||
}
|
||||
|
||||
.theme-doc-sidebar-item-category.menu__list-item:not(:first-child) {
|
||||
margin-top: 0.5rem !important;
|
||||
}
|
||||
|
|
@ -54,6 +73,34 @@ p {
|
|||
text-align: start;
|
||||
}
|
||||
|
||||
/* Tabs Styling */
|
||||
.tabs-container {
|
||||
border: 1px solid var(--ifm-color-emphasis-300);
|
||||
border-radius: var(--ifm-global-radius);
|
||||
padding: 1rem;
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
.tabs {
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
.tabs__item {
|
||||
border: none;
|
||||
border-bottom: 1px solid var(--ifm-color-emphasis-200);
|
||||
margin-right: 0rem;
|
||||
padding-bottom: 0.5rem;
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
.tabs__item:hover {
|
||||
background-color: var(--ifm-hover-overlay);
|
||||
}
|
||||
|
||||
.tabs__item--active {
|
||||
border-bottom-color: var(--ifm-tabs-color-active);
|
||||
}
|
||||
|
||||
/* apply */
|
||||
#hero-apply {
|
||||
z-index: -1;
|
||||
|
|
@ -80,21 +127,19 @@ p {
|
|||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Hero component title overrides to match other heading styles
|
||||
*/
|
||||
/* Hero component title overrides to match other heading styles */
|
||||
.hero-title {
|
||||
color: rgb(28, 30, 33);
|
||||
font-family: var(--ifm-heading-font-family);
|
||||
}
|
||||
h1 {
|
||||
font-size: 26px;
|
||||
font-size: 30px;
|
||||
}
|
||||
h2 {
|
||||
font-size: 22px;
|
||||
font-size: 25px;
|
||||
}
|
||||
h3 {
|
||||
font-size: 18px;
|
||||
font-size: 22px;
|
||||
}
|
||||
|
||||
body {
|
||||
|
|
@ -154,7 +199,6 @@ body {
|
|||
}
|
||||
|
||||
/* Discord */
|
||||
|
||||
.header-discord-link {
|
||||
margin-right: 0.5rem;
|
||||
}
|
||||
|
|
@ -279,8 +323,10 @@ body {
|
|||
|
||||
/* Footer Styles */
|
||||
.footer {
|
||||
padding: 8px 0;
|
||||
background-color: var(--ifm-navbar-background-color);
|
||||
/* padding: 8px 0; */
|
||||
padding: 1rem 0 0;
|
||||
background-color: var(--ifm-background-color);
|
||||
border-top: 1px solid var(--ifm-color-emphasis-200);
|
||||
}
|
||||
|
||||
[data-theme="light"] .footer {
|
||||
|
|
@ -317,11 +363,16 @@ body {
|
|||
}
|
||||
|
||||
.footer .container {
|
||||
padding: 0 5rem;
|
||||
padding: 0 1.25rem;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
justify-content: left;
|
||||
align-items: center;
|
||||
flex-direction: row-reverse;
|
||||
max-width: 100%;
|
||||
}
|
||||
|
||||
.footer__title {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
/* Sidebar Styles */
|
||||
|
|
@ -416,3 +467,21 @@ body {
|
|||
padding: 0;
|
||||
}
|
||||
|
||||
/* DocSearch Input Styling - Simple border override */
|
||||
.DocSearch-Button {
|
||||
border: 1px solid var(--ifm-color-emphasis-300) !important;
|
||||
border-radius: 6px !important;
|
||||
background: var(--ifm-color-content-inverse) !important;
|
||||
color: var(--ifm-color-emphasis-500) !important;
|
||||
}
|
||||
|
||||
.DocSearch-Button:hover,
|
||||
.DocSearch-Button:focus {
|
||||
border-color: var(--ifm-color-primary) !important;
|
||||
box-shadow: 0 0 0 1px var(--ifm-color-primary) !important;
|
||||
}
|
||||
|
||||
.DocSearch-Search-Icon {
|
||||
color: var(--ifm-color-emphasis-500) !important;
|
||||
size: 16px !important;
|
||||
}
|
||||
|
|
@ -419,4 +419,8 @@ curl -X DELETE \
|
|||
|
||||
## Create upload file (Deprecated)
|
||||
|
||||
This endpoint is deprecated. Use the `/files` endpoints instead.
|
||||
This endpoint is deprecated. Use the `/files` endpoints instead.
|
||||
|
||||
## See also
|
||||
|
||||
* [Manage files](/concepts-file-management)
|
||||
|
|
@ -6,9 +6,26 @@ slug: /api-monitor
|
|||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
Use the `/monitor` endpoint to monitor and modify messages passed between Langflow components, vertex builds, and transactions.
|
||||
The `/monitor` endpoints are for internal Langflow functionality, primarily related to running flows in the **Playground**, storing chat history, and generating flow logs.
|
||||
|
||||
## Get Vertex builds
|
||||
This information is primarily for those who are building custom components or contributing to the Langflow codebase in a way that requires calling or understanding these endpoints.
|
||||
|
||||
For typical application development with Langflow, there are more appropriate options for monitoring, debugging, and memory management.
|
||||
For more information, see the following:
|
||||
|
||||
* [Logs](/logging): Langflow log storage locations, customization options, and where to view logs in the visual editor
|
||||
* [Test flows in the Playground](/concepts-playground): Run flows and inspect message history
|
||||
* [Memory management options](/memory): Langflow storage locations and options, including the database, cache, and chat history
|
||||
|
||||
## Vertex builds
|
||||
|
||||
The Vertex build endpoints (`/monitor/builds`) are exclusively for **Playground** functionality.
|
||||
|
||||
When you run a flow in the **Playground**, Langflow calls the `/build/$FLOW_ID/flow` endpoint in [chat.py](https://github.com/langflow-ai/langflow/blob/main/src/backend/base/langflow/api/v1/chat.py#L143). This call retrieves the flow data, builds a graph, and executes the graph. As each component (or node) is executed, the `build_vertex` function calls `build_and_run`, which may call the individual components' `def_build` method, if it exists. If a component doesn't have a `def_build` function, the build still returns a component.
|
||||
|
||||
The `build` function allows components to execute logic at runtime. For example, the [**Recursive Character Text Splitter** component](https://github.com/langflow-ai/langflow/blob/main/src/backend/base/langflow/components/langchain_utilities/recursive_character.py) is a child of the `LCTextSplitterComponent` class. When text needs to be processed, the parent class's `build` method is called, which creates a `RecursiveCharacterTextSplitter` object and uses it to split the text according to the defined parameters. The split text is then passed on to the next component. This all occurs when the component is built.
|
||||
|
||||
### Get Vertex builds
|
||||
|
||||
Retrieve Vertex builds for a specific flow.
|
||||
|
||||
|
|
@ -384,7 +401,7 @@ curl -X GET \
|
|||
|
||||
</details>
|
||||
|
||||
## Delete Vertex builds
|
||||
### Delete Vertex builds
|
||||
|
||||
Delete Vertex builds for a specific flow.
|
||||
|
||||
|
|
@ -404,7 +421,12 @@ curl -X DELETE \
|
|||
|
||||
</details>
|
||||
|
||||
## Get messages
|
||||
## Messages endpoints
|
||||
|
||||
The `/monitor/messages` endpoints store, retrieve, edit, and delete records in the message table in [`langflow.db`](/memory)
|
||||
Typically, these are called implicitly when running flows that produce message history, or when inspecting and modifying **Playground** memories.
|
||||
|
||||
### Get messages
|
||||
|
||||
Retrieve a list of all messages:
|
||||
|
||||
|
|
@ -466,7 +488,7 @@ curl -X GET \
|
|||
|
||||
</details>
|
||||
|
||||
## Delete messages
|
||||
### Delete messages
|
||||
|
||||
Delete specific messages by their IDs.
|
||||
|
||||
|
|
@ -490,7 +512,7 @@ curl -v -X DELETE \
|
|||
|
||||
</details>
|
||||
|
||||
## Update message
|
||||
### Update message
|
||||
|
||||
Update a specific message by its ID.
|
||||
|
||||
|
|
@ -540,7 +562,7 @@ curl -X PUT \
|
|||
|
||||
</details>
|
||||
|
||||
## Update session ID
|
||||
### Update session ID
|
||||
|
||||
Update the session ID for messages.
|
||||
|
||||
|
|
@ -591,7 +613,7 @@ curl -X PATCH \
|
|||
|
||||
</details>
|
||||
|
||||
## Delete messages by session
|
||||
### Delete messages by session
|
||||
|
||||
Delete all messages for a specific session.
|
||||
|
||||
|
|
@ -614,6 +636,7 @@ HTTP/1.1 204 No Content
|
|||
## Get transactions
|
||||
|
||||
Retrieve all transactions, which are interactions between components, for a specific flow.
|
||||
This information is also available in [flow logs](/logging).
|
||||
|
||||
```bash
|
||||
curl -X GET \
|
||||
|
|
@ -651,4 +674,5 @@ curl -X GET \
|
|||
|
||||
## See also
|
||||
|
||||
- [Use voice mode](/concepts-voice-mode)
|
||||
- [Session ID](/session-id)
|
||||
|
|
@ -186,9 +186,198 @@ curl -X GET \
|
|||
-H "x-api-key: $LANGFLOW_API_KEY"
|
||||
```
|
||||
|
||||
## Available endpoints
|
||||
|
||||
Because you can run Langflow as either an IDE (frontend and backend) or a runtime (headless, backend-only), it serves endpoints that support frontend and backend operations.
|
||||
Many endpoints are for orchestration between the frontend and backend, reading and writing to the Langflow database, or enabling frontend functionality, like the **Playground**.
|
||||
Unless you are contributing to the Langflow codebase, you won't directly call most of the Langflow endpoints.
|
||||
|
||||
For application development, the most commonly used endpoints are the `/run` and `/webhook` [flow trigger endpoints](/api-flows-run).
|
||||
For some use cases, you might use some other endpoints, such as the `/files` endpoints to use files in flows.
|
||||
|
||||
To help you explore the available endpoints, the following lists are sorted by primary use case, although some endpoints might support multiple use cases.
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="Application development" label="Application development" default>
|
||||
|
||||
The following endpoints are useful for developing applications with Langflow and administering Langflow deployments with one or more users.
|
||||
You will most often use the flow trigger endpoints.
|
||||
Other endpoints are helpful for specific use cases, such as administration and flow management in runtime deployments that don't have a visual editor.
|
||||
|
||||
* [Flow trigger endpoints](/api-flows-run):
|
||||
* POST `/v1/run/{flow_id_or_name}`: Run a flow.
|
||||
* POST `/v1/run/advanced/{flow_id}`: Advanced run with explicit `inputs`, `outputs`, `tweaks`, and optional `session_id`.
|
||||
* POST `/v1/webhook/{flow_id_or_name}`: Trigger a flow via webhook payload.
|
||||
|
||||
* Deployment details:
|
||||
* GET `/v1/version`: Return Langflow version. See [Get version](/api-reference-api-examples#get-version).
|
||||
* GET `/v1/config`: Return deployment configuration. See [Get configuration](/api-reference-api-examples#get-configuration).
|
||||
|
||||
* [Projects endpoints](/api-projects):
|
||||
* POST `/v1/projects/`: Create a project.
|
||||
* GET `/v1/projects/`: List projects.
|
||||
* GET `/v1/projects/{project_id}`: Read a project (with paginated flows support).
|
||||
* PATCH `/v1/projects/{project_id}`: Update project info and membership.
|
||||
* DELETE `/v1/projects/{project_id}`: Delete a project.
|
||||
* GET `/v1/projects/download/{project_id}`: Export all flows in a project as ZIP.
|
||||
* POST `/v1/projects/upload/`: Import a project ZIP (creates project and flows).
|
||||
* GET `/v1/starter-projects/`: Return a list of templates.
|
||||
|
||||
* [Files endpoints](/api-files):
|
||||
* Files (v1)
|
||||
* POST `/v1/files/upload/{flow_id}`: Upload a file to a specific flow.
|
||||
* GET `/v1/files/download/{flow_id}/{file_name}`: Download a file from a flow.
|
||||
* GET `/v1/files/images/{flow_id}/{file_name}`: Stream an image from a flow.
|
||||
* GET `/v1/files/profile_pictures/{folder_name}/{file_name}`: Get a profile picture asset.
|
||||
* GET `/v1/files/profile_pictures/list`: List available profile picture assets.
|
||||
* GET `/v1/files/list/{flow_id}`: List files for a flow.
|
||||
* DELETE `/v1/files/delete/{flow_id}/{file_name}`: Delete a file from a flow.
|
||||
* Files (v2)
|
||||
* POST `/v2/files` (alias `/v2/files/`): Upload a file owned by the current user.
|
||||
* GET `/v2/files` (alias `/v2/files/`): List files owned by the current user.
|
||||
* DELETE `/v2/files/batch/`: Delete multiple files by IDs.
|
||||
* POST `/v2/files/batch/`: Download multiple files as a ZIP by IDs.
|
||||
* GET `/v2/files/{file_id}`: Download a file by ID (or return raw content internally).
|
||||
* PUT `/v2/files/{file_id}`: Edit a file name by ID.
|
||||
* DELETE `/v2/files/{file_id}`: Delete a file by ID.
|
||||
* DELETE `/v2/files` (alias `/v2/files/`): Delete all files for the current user.
|
||||
|
||||
* [API keys and authentication](/api-keys-and-authentication):
|
||||
* GET `/v1/api_key/`: List API keys for the current user.
|
||||
* POST `/v1/api_key/`: Create a new API key.
|
||||
* DELETE `/v1/api_key/{api_key_id}`: Delete an API key.
|
||||
* POST `/v1/api_key/store`: Save an encrypted Store API key (cookie set).
|
||||
|
||||
* [Flow management endpoints](/api-flows):
|
||||
* POST `/v1/flows/`: Create a flow.
|
||||
* GET `/v1/flows/`: List flows (supports pagination and filters).
|
||||
* GET `/v1/flows/{flow_id}`: Read a flow by ID.
|
||||
* GET `/v1/flows/public_flow/{flow_id}`: Read a public flow by ID.
|
||||
* PATCH `/v1/flows/{flow_id}`: Update a flow.
|
||||
* DELETE `/v1/flows/{flow_id}`: Delete a flow.
|
||||
* POST `/v1/flows/batch/`: Create multiple flows.
|
||||
* POST `/v1/flows/upload/`: Import flows from a JSON file.
|
||||
* DELETE `/v1/flows/`: Delete multiple flows by IDs.
|
||||
* POST `/v1/flows/download/`: Export flows to a ZIP file.
|
||||
* GET `/v1/flows/basic_examples/`: List basic example flows.
|
||||
|
||||
* [Users endpoints](/api-users):
|
||||
* POST `/v1/users/`: Add a user (superuser required when auth enabled).
|
||||
* GET `/v1/users/whoami`: Return the current authenticated user.
|
||||
* GET `/v1/users/`: List all users (superuser required).
|
||||
* PATCH `/v1/users/{user_id}`: Update a user (with role checks).
|
||||
* PATCH `/v1/users/{user_id}/reset-password`: Reset own password.
|
||||
* DELETE `/v1/users/{user_id}`: Delete a user (cannot delete yourself).
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="Custom components" label="Custom components">
|
||||
|
||||
You might use these endpoints when developing custom Langflow components for your own use or to share with the Langflow community:
|
||||
|
||||
* Develop custom components:
|
||||
* GET `/v1/all`: Return all available Langflow component types. See [Get all components](/api-reference-api-examples#get-all-components).
|
||||
* POST `/v1/custom_component`: Build a custom component from code and return its node.
|
||||
* POST `/v1/custom_component/update`: Update an existing custom component's build config and outputs.
|
||||
* POST `/v1/validate/code`: Validate a Python code snippet for a custom component.
|
||||
|
||||
* Langflow Store:
|
||||
* GET `/v1/store/check/`: Return whether the Store feature is enabled.
|
||||
* GET `/v1/store/check/api_key`: Check if a Store API key exists and is valid.
|
||||
* POST `/v1/store/components/`: Share a component to the Store.
|
||||
* PATCH `/v1/store/components/{component_id}`: Update a shared component.
|
||||
* GET `/v1/store/components/`: List available Store components (filters supported).
|
||||
* GET `/v1/store/components/{component_id}`: Download a component from the Store.
|
||||
* GET `/v1/store/tags`: List Store tags.
|
||||
* GET `/v1/store/users/likes`: List components liked by the current user.
|
||||
* POST `/v1/store/users/likes/{component_id}`: Like a component.
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="MCP" label="MCP servers and clients">
|
||||
|
||||
The following endpoints are for managing Langflow MCP servers, both Langflow-hosted MCP servers and external MCP server connections:
|
||||
|
||||
* **MCP (global)**:
|
||||
* HEAD `/v1/mcp/sse`: Health check for MCP SSE.
|
||||
* GET `/v1/mcp/sse`: Open SSE stream for MCP server events.
|
||||
* POST `/v1/mcp/`: Post messages to the MCP server.
|
||||
|
||||
* **MCP (project-specific)**:
|
||||
* GET `/v1/mcp/project/{project_id}`: List MCP-enabled tools and project auth settings.
|
||||
* HEAD `/v1/mcp/project/{project_id}/sse`: Health check for project SSE.
|
||||
* GET `/v1/mcp/project/{project_id}/sse`: Open project-scoped MCP SSE.
|
||||
* POST `/v1/mcp/project/{project_id}`: Post messages to project MCP server.
|
||||
* POST `/v1/mcp/project/{project_id}/` (trailing slash): Same as above.
|
||||
* PATCH `/v1/mcp/project/{project_id}`: Update MCP settings for flows and project auth settings.
|
||||
* POST `/v1/mcp/project/{project_id}/install`: Install MCP client config for Cursor/Windsurf/Claude (local only).
|
||||
* GET `/v1/mcp/project/{project_id}/installed`: Check which clients have MCP config installed.
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="Codebase contribution" label="Codebase development">
|
||||
|
||||
The following endpoints are most often used when contributing to the Langflow codebase, and you need to understand or call endpoints that support frontend-to-backend orchestration or other internal functionality.
|
||||
|
||||
* Base (metadata):
|
||||
* GET `/v1/all`: Return all available Langflow component types. See [Get all components](/api-reference-api-examples#get-all-components).
|
||||
* GET `/v1/version`: Return Langflow version. See [Get version](/api-reference-api-examples#get-version).
|
||||
* GET `/v1/config`: Return deployment configuration. See [Get configuration](/api-reference-api-examples#get-configuration).
|
||||
* GET `/v1/starter-projects/`: Return a list of templates.
|
||||
|
||||
* [Build endpoints](/api-build) (internal editor support):
|
||||
* POST `/v1/build/{flow_id}/flow`: Start a flow build and return a job ID.
|
||||
* GET `/v1/build/{job_id}/events`: Stream or fetch build events.
|
||||
* POST `/v1/build/{job_id}/cancel`: Cancel a build job.
|
||||
* POST `/v1/build_public_tmp/{flow_id}/flow`: Build a public flow without auth.
|
||||
* POST `/v1/validate/prompt`: Validate a prompt payload.
|
||||
|
||||
* [API keys and authentication](/api-keys-and-authentication):
|
||||
* POST `/v1/login`: Login and set tokens as cookies.
|
||||
* GET `/v1/auto_login`: Auto-login (if enabled) and set tokens.
|
||||
* POST `/v1/refresh`: Refresh tokens using refresh cookie.
|
||||
* POST `/v1/logout`: Logout and clear cookies.
|
||||
|
||||
* [Monitor endpoints](/api-monitor):
|
||||
* GET `/v1/monitor/builds`: Get vertex builds for a flow.
|
||||
* DELETE `/v1/monitor/builds`: Delete vertex builds for a flow.
|
||||
* GET `/v1/monitor/messages/sessions`: List message session IDs (auth required).
|
||||
* GET `/v1/monitor/messages`: List messages with optional filters.
|
||||
* DELETE `/v1/monitor/messages`: Delete messages by IDs (auth required).
|
||||
* PUT `/v1/monitor/messages/{message_id}`: Update a message.
|
||||
* PATCH `/v1/monitor/messages/session/{old_session_id}`: Change a session ID for all messages in that session.
|
||||
* DELETE `/v1/monitor/messages/session/{session_id}`: Delete messages by session.
|
||||
* GET `/v1/monitor/transactions`: List transactions for a flow (paginated).
|
||||
|
||||
* Variables:
|
||||
* POST `/v1/variables/`: Create a variable, such as an API key, for the user.
|
||||
* GET `/v1/variables/`: List variables for the user.
|
||||
* PATCH `/v1/variables/{variable_id}`: Update a variable.
|
||||
* DELETE `/v1/variables/{variable_id}`: Delete a variable.
|
||||
|
||||
* [Use voice mode](/concepts-voice-mode):
|
||||
* WS `/v1/voice/ws/flow_as_tool/{flow_id}`: Bi-directional voice session exposing the flow as a tool.
|
||||
* WS `/v1/voice/ws/flow_as_tool/{flow_id}/{session_id}`: Same as above with explicit session ID.
|
||||
* WS `/v1/voice/ws/flow_tts/{flow_id}`: Voice-to-text session that runs a flow and returns TTS.
|
||||
* WS `/v1/voice/ws/flow_tts/{flow_id}/{session_id}`: Same as above with explicit session ID.
|
||||
* GET `/v1/voice/elevenlabs/voice_ids`: List available ElevenLabs voice IDs for the user.
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="Deprecated" label="Deprecated">
|
||||
|
||||
The following endpoints are deprecated:
|
||||
|
||||
* POST `/v1/predict/{flow_id}`: Use [`/v1/run/{flow_id}`](/api-flows-run) instead.
|
||||
* POST `/v1/process/{flow_id}`: Use [`/v1/run/{flow_id}`](/api-flows-run) instead.
|
||||
* GET `/v1/task/{task_id}`: Deprecated functionality.
|
||||
* POST `/v1/upload/{flow_id}`: Use [`/files`](/api-files) instead.
|
||||
* POST `/v1/build/{flow_id}/vertices`: Replaced by [`/monitor/builds`](/api-monitor).
|
||||
* POST `/v1/build/{flow_id}/vertices/{vertex_id}`: Replaced by [`/monitor/builds`](/api-monitor).
|
||||
* GET `/v1/build/{flow_id}/{vertex_id}/stream`: Replaced by [`/monitor/builds`](/api-monitor).
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Next steps
|
||||
|
||||
- Use the Langflow API to [run a flow](/api-flows-run).
|
||||
- Use the Langflow API to [upload files](/api-files).
|
||||
- Use the Langflow API to [get flow logs](/api-logs).
|
||||
- Explore all endpoints in the [Langflow API specification](/api).
|
||||
* Use the Langflow API to [run a flow](/api-flows-run).
|
||||
* Use the Langflow API to [upload files](/api-files).
|
||||
* Use the Langflow API to [get flow logs](/api-logs).
|
||||
* Explore all endpoints in the [Langflow API specification](/api).
|
||||
|
|
@ -44,6 +44,12 @@ For more information about using embedding model components in flows, see [**Emb
|
|||
| temperature | Float | Input parameter. The model temperature for embedding generation. Default: `0.1`. |
|
||||
| embeddings | Embeddings | Output parameter. An `NVIDIAEmbeddings` instance for generating embeddings. |
|
||||
|
||||
:::tip Tokenization considerations
|
||||
Be aware of your embedding model's chunk size limit.
|
||||
Tokenization errors can occur if your text chunks are too large.
|
||||
For more information, see [Tokenization errors due to chunk size](/components-processing#chunk-size).
|
||||
:::
|
||||
|
||||
## NVIDIA Rerank
|
||||
|
||||
This component finds and reranks documents using the NVIDIA API.
|
||||
|
|
@ -53,7 +59,7 @@ This component finds and reranks documents using the NVIDIA API.
|
|||
This component uses the NVIDIA `nv-ingest` microservice for data ingestion, processing, and extraction of text files.
|
||||
For more information, see [Integrate NVIDIA Retriever Extraction with Langflow](/integrations-nvidia-ingest).
|
||||
|
||||
## NVIDIA System-Assist
|
||||
## NVIDIA G-Assist
|
||||
|
||||
This component requires a specific system environment.
|
||||
For information about this component, see [Integrate NVIDIA G-Assist with Langflow](/integrations-nvidia-g-assist).
|
||||
|
|
@ -29,9 +29,8 @@ You can toggle parameters through the <Icon name="SlidersHorizontal" aria-hidden
|
|||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| model_name | String | Input parameter. The name of the Perplexity model to use. Options include various Llama 3.1 models. |
|
||||
| max_output_tokens | Integer | Input parameter. The maximum number of tokens to generate. |
|
||||
| max_tokens | Integer | Input parameter. The maximum number of tokens to generate. |
|
||||
| api_key | SecretString | Input parameter. The Perplexity API Key for authentication. |
|
||||
| temperature | Float | Input parameter. Controls randomness in the output. Default: 0.75. |
|
||||
| top_p | Float | Input parameter. The maximum cumulative probability of tokens to consider when sampling (advanced). |
|
||||
| n | Integer | Input parameter. Number of chat completions to generate for each prompt (advanced). |
|
||||
| top_k | Integer | Input parameter. Number of top tokens to consider for top-k sampling. Must be positive (advanced). |
|
||||
| n | Integer | Input parameter. Number of chat completions to generate for each prompt (advanced). |
|
||||
|
|
@ -43,7 +43,7 @@ For examples of flows using the **Agent** and **MCP Tools** components, see the
|
|||
The **Agent** component is the primary agent actor in your agent flows.
|
||||
This component uses an LLM integration to respond to input, such as a chat message or file upload.
|
||||
|
||||
The agent can use the tools already available in the base LLM model as well as additional tools that you connect to the **Agent** component's **Tools** port.
|
||||
The agent can use the tools already available in the base LLM as well as additional tools that you connect to the **Agent** component's **Tools** port.
|
||||
You can connect any Langflow component as a tool, including other **Agent** components and MCP servers through the [**MCP Tools** component](#mcp-connection).
|
||||
|
||||
For more information about using this component, see [Use Langflow agents](/agents).
|
||||
|
|
|
|||
|
|
@ -243,9 +243,13 @@ By default, Langflow looks for custom components in the `/components` directory.
|
|||
|
||||
If you're creating custom components in a different location using the `LANGFLOW_COMPONENTS_PATH` [environment variable](/environment-variables), components must be organized in a specific directory structure to be properly loaded and displayed in the visual editor:
|
||||
|
||||
Each category directory **must** contain an `__init__.py` file for Langflow to properly recognize and load the components.
|
||||
This is a Python package requirement that ensures the directory is treated as a module.
|
||||
|
||||
```
|
||||
/your/custom/components/path/ # Base directory set by LANGFLOW_COMPONENTS_PATH
|
||||
└── category_name/ # Required category subfolder that determines menu name
|
||||
├── __init__.py # Required
|
||||
└── custom_component.py # Component file
|
||||
```
|
||||
|
||||
|
|
@ -257,6 +261,7 @@ For example, to add a component to the **Helpers** category, place it in the `he
|
|||
```
|
||||
/app/custom_components/ # LANGFLOW_COMPONENTS_PATH
|
||||
└── helpers/ # Displayed within the "Helpers" category
|
||||
├── __init__.py # Required
|
||||
└── custom_component.py # Your component
|
||||
```
|
||||
|
||||
|
|
@ -264,8 +269,10 @@ You can have multiple category folders to organize components into different cat
|
|||
```
|
||||
/app/custom_components/
|
||||
├── helpers/
|
||||
│ ├── __init__.py
|
||||
│ └── helper_component.py
|
||||
└── tools/
|
||||
├── __init__.py
|
||||
└── tool_component.py
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -396,7 +396,7 @@ There are two settings that control the output of the **URL** component at diffe
|
|||
When used as a standard component in a flow, the **URL** component must be connected to a component that accepts the selected output data type (`DataFrame` or `Message`).
|
||||
You can connect the **URL** component directly to a compatible component, or you can use a [**Type Convert** component](/components-processing#type-convert) to convert the output to another type before passing the data to other components if the data types aren't directly compatible.
|
||||
|
||||
Processing components, like the **Type Convert** component, are useful with the **URL** component because it can extract a large amount of data from the crawled pages.
|
||||
**Processing** components like the **Type Convert** component are useful with the **URL** component because it can extract a large amount of data from the crawled pages.
|
||||
For example, if you only want to pass specific fields to other components, you can use a [**Parser** component](/components-processing#parser) to extract only that data from the crawled pages before passing the data to other components.
|
||||
|
||||
When used in **Tool Mode** with an **Agent** component, the **URL** component can be connected directly to the **Agent** component's **Tools** port without converting the data.
|
||||
|
|
|
|||
|
|
@ -8,9 +8,7 @@ import Icon from "@site/src/components/icon";
|
|||
**Embedding Model** components in Langflow generate text embeddings using a specified Large Language Model (LLM).
|
||||
|
||||
Langflow includes an **Embedding Model** core component that has built-in support for some LLMs.
|
||||
Alternatively, you can use any [additional **Embedding Model** component](#additional-embedding-model-components) in place of the core **Embedding Model** component.
|
||||
|
||||
The built-in LLMs are appropriate for most text-based embedding model use cases in Langflow.
|
||||
Alternatively, you can use [additional embedding models](#additional-embedding-model-components) in place of the core **Embedding Model** component.
|
||||
|
||||
## Use Embedding Model components in a flow
|
||||
|
||||
|
|
@ -21,22 +19,16 @@ This flow loads a text file, splits the text into chunks, generates embeddings f
|
|||
|
||||

|
||||
|
||||
:::tip
|
||||
This example uses the **Embedding Model** core component.
|
||||
|
||||
To use another model, you can replace the **Embedding Model** core component with any [additional **Embedding Model** component](#additional-embedding-model-components) in these steps.
|
||||
However, your component might have different parameters than the **Embedding Model** core component.
|
||||
:::
|
||||
|
||||
1. Create a flow, add a **File** component, and then select a file containing text data, such as a PDF, that you can use to test the flow.
|
||||
|
||||
2. Add an **Embedding Model** component, and then provide a valid OpenAI API key.
|
||||
You can enter component API keys directly or use Langflow global variables to reference your API keys.
|
||||
|
||||
By default, the **Embedding Model** component uses an OpenAI model.
|
||||
If you want to use a different model, edit the **Model Name**, and **API Key** fields accordingly.
|
||||
Or, see [Additional Embedding Model components](#additional-embedding-model-components) for other components that you can use in place of the **Embedding Model** core component.
|
||||
:::tip
|
||||
If your preferred embedding model provider or model isn't supported by the **Embedding Model** core component, you can use [additional embedding models](#additional-embedding-model-components) in place of the core component.
|
||||
|
||||
You can enter component API keys directly or use Langflow global variables to reference your API keys.
|
||||
Search the **Components** menu for your preferred provider to find additional embedding models, such as the [**Hugging Face Embeddings Inference** component](/bundles-huggingface#hugging-face-embeddings-inference).
|
||||
:::
|
||||
|
||||
3. Add a [**Split Text** component](/components-processing#split-text) to your flow.
|
||||
This component splits text input into smaller chunks to be processed into embeddings.
|
||||
|
|
@ -76,9 +68,9 @@ You can toggle parameters through the <Icon name="SlidersHorizontal" aria-hidden
|
|||
| model_kwargs | Model Kwargs | Dictionary | Input parameter. Additional keyword arguments to pass to the model. |
|
||||
| embeddings | Embeddings | Embeddings | Output parameter. An instance for generating embeddings using the selected provider. |
|
||||
|
||||
## Additional Embedding Model components
|
||||
## Additional embedding models {#additional-embedding-model-components}
|
||||
|
||||
If your provider or model isn't supported by the **Embedding Model** core component, additional single-provider **Embedding Model** components are available in the [**Bundles**](/components-bundle-components) section of the **Components** menu.
|
||||
If your provider or model isn't supported by the **Embedding Model** core component, additional provider-specific **Embedding Model** components are available in the [**Bundles**](/components-bundle-components) section of the **Components** menu.
|
||||
|
||||
## Legacy embedding components
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,10 @@ The following example uses the **If-Else** component to check incoming chat mess
|
|||
|
||||
1. Add an **If-Else** component to your flow, and then configure it as follows:
|
||||
|
||||
* **Text Input**: Connect the **Text Input** port to a **Chat Input** component.
|
||||
* **Text Input**: Connect the **Text Input** port to a **Chat Input** component or another `Message` input.
|
||||
|
||||
If your input isn't in `Message` format, you can use another component to transform it, such as the [**Type Convert** component](/components-processing#type-convert) or [**Parser** component](/components-processing#parser).
|
||||
If your input isn't appropriate for `Message` format, consider using another component for conditional routing, such as the [**Data Operations** component](/components-processing#data-operations).
|
||||
|
||||
* **Match Text**: Enter `.*(urgent|warning|caution).*` so the component looks for these values in incoming input. The regex match is case sensitive, so if you need to look for all permutations of `warning`, enter `warning|Warning|WARNING`.
|
||||
|
||||
|
|
@ -96,7 +99,10 @@ You can toggle parameters through the <Icon name="SlidersHorizontal" aria-hidden
|
|||
|
||||
## Loop
|
||||
|
||||
The **Loop** component iterates over a list of input by passing individual items to other components attached at the **Item** output port until there are no items left to process. Then, the **Loop** component passes the aggregated result of all looping to the component connected to the **Done** port.
|
||||
The **Loop** component iterates over a list of input by passing individual items to other components attached at the **Item** output port until there are no items left to process.
|
||||
Then, the **Loop** component passes the aggregated result of all looping to the component connected to the **Done** port.
|
||||
|
||||
### The looping process
|
||||
|
||||
The **Loop** component is like a miniature flow within your flow.
|
||||
Here's a breakdown of the looping process:
|
||||
|
|
@ -115,9 +121,13 @@ Here's a breakdown of the looping process:
|
|||
|
||||
Only one component connects to the **Item** port, but you can pass the data through as many components as you need, as long as the last component in the chain connects back to the **Looping** port.
|
||||
|
||||
The **If-Else** component isn't compatible with the **Loop** component.
|
||||
For more information, see [Conditional looping](#conditional-looping).
|
||||
|
||||
4. After processing all items, the results are aggregated into a single `Data` object that is passed from the **Loop** component's **Done** port to the next component in the flow.
|
||||
|
||||
In terms of simplified code, the **Loop** component works like this:
|
||||
The following simplified Python code summarizes how the **Loop** component works.
|
||||
This _isn't_ the actual component code; it is only meant to help you understand the general process.
|
||||
|
||||
```python
|
||||
for i in input: # Receive input data as a list
|
||||
|
|
@ -132,8 +142,7 @@ done = aggregate_results() # Compile all returned items
|
|||
print(done) # Send the aggregated results from the Done port to another component
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>Loop example</summary>
|
||||
### Loop example
|
||||
|
||||
In the follow example, the **Loop** component iterates over a CSV file until there are no rows left to process.
|
||||
In this case, the **Item** port passes each row to a **Type Convert** component to convert the row into a `Message` object, passes the `Message` to a **Structured Output** component to be processed into structured data that is then passed back to the **Loop** component's **Looping** port.
|
||||
|
|
@ -145,7 +154,13 @@ After processing all rows, the **Loop** component loads the aggregated list of s
|
|||
For more examples of the **Loop** component, try the **Research Translation Loop** template in Langflow, or see the video tutorial [Mastering the Loop Component & Agentic RAG in Langflow](https://www.youtube.com/watch?v=9Wx7WODSKTo).
|
||||
:::
|
||||
|
||||
</details>
|
||||
### Conditional looping
|
||||
|
||||
The **If-Else** component isn't compatible with the **Loop** component.
|
||||
If you need conditional loop events, redesign your flow to process conditions before the loop.
|
||||
For example, if you are looping over a `DataFrame`, you could use multiple [**DataFrame Operations** components](/components-processing#dataframe-operations) to conditionally filter data, and then run separate loops on each set of filtered data.
|
||||
|
||||

|
||||
|
||||
## Notify and Listen
|
||||
|
||||
|
|
|
|||
|
|
@ -7,8 +7,8 @@ import Icon from "@site/src/components/icon";
|
|||
|
||||
**Language Model** components in Langflow generate text using a specified Large Language Model (LLM).
|
||||
|
||||
Langflow includes a **Language Model** core component that has built-in support for many LLMs, as well as an interface to connect any [additional **Language Model** component](#additional-language-model-components).
|
||||
The built-in LLMs are appropriate for most text-based language model use cases in Langflow.
|
||||
Langflow includes a **Language Model** core component that has built-in support for many LLMs.
|
||||
Alternatively, you can use any [additional language model](#additional-language-models) in place of the core **Language Model** component.
|
||||
|
||||
## Use Language Model components in a flow
|
||||
|
||||
|
|
@ -18,19 +18,20 @@ These components accept inputs like chat messages, files, and instructions in or
|
|||
The flow must include [**Chat Input and Output** components](/components-io#chat-io) to allow chat-based interactions with the LLM.
|
||||
However, you can also use the **Language Model** component for actions that don't emit chat output directly, such as the **Smart Function** component.
|
||||
|
||||
The following example uses the **Language Model** core component and a built-in LLM to create a chatbot flow similar to the **Basic Prompting** template.
|
||||
The example focuses on using the built-in models, but it also indicates where you can integrate another model.
|
||||
The following example uses the **Language Model** core component to create a chatbot flow similar to the **Basic Prompting** template.
|
||||
It also explains how you can replace the core component with another LLM.
|
||||
|
||||
1. Add the **Language Model** component to your flow.
|
||||
|
||||
2. In the **OpenAI API Key** field, enter your OpenAI API key.
|
||||
|
||||
This example uses the default OpenAI model and a built-in Anthropic model to compare responses from different providers.
|
||||
|
||||
If you want to use a different provider, edit the **Model Provider**, **Model Name**, and **API Key** fields accordingly.
|
||||
|
||||
If you want to use provider or model that isn't built-in to the **Language Model** core component, see [Additional Language Model components](#additional-language-model-components) to learn how to connect a **Custom** model provider to the **Language Model** component.
|
||||
:::tip My preferred provider or model isn't listed
|
||||
If you want to use a provider or model that isn't built-in to the **Language Model** core component, you can replace this component with another compatible component, as explained in [Additional language models](#additional-language-models).
|
||||
Then, you can continue following these steps to build your flow.
|
||||
:::
|
||||
|
||||
3. In the [component's header menu](/concepts-components#component-menus), click <Icon name="SlidersHorizontal" aria-hidden="true"/> **Controls**, enable the **System Message** parameter, and then click **Close**.
|
||||
|
||||
|
|
@ -118,24 +119,27 @@ This is a specific data type that is only required by certain components, such a
|
|||
With this configuration, the **Language Model** component is meant to support an action completed by another component, rather than producing a text response for a standard chat-based interaction.
|
||||
For an example, the **Smart Function** component uses an LLM to create a function from natural language input.
|
||||
|
||||
## Additional Language Model components
|
||||
## Additional language models
|
||||
|
||||
If your provider or model isn't supported by the **Language Model** core component, additional single-provider **Language Model** components are available in the [**Bundles**](/components-bundle-components) section of the **Components** menu.
|
||||
If your provider or model isn't supported by the **Language Model** core component, additional provider-specific models are available in the [**Bundles**](/components-bundle-components) section of the **Components** menu.
|
||||
|
||||
You can use bundled components directly in your flows or you can connect them to other components that accept a [`LanguageModel`](/data-types#languagemodel) input, such as the **Language Model** and **Agent** components.
|
||||
You can use these provider-specific components directly in your flows in the same place that you would use the **Language Model** core component.
|
||||
Or, you can connect them to other components that accept a [`LanguageModel`](/data-types#languagemodel) input, such as the **Smart Function** and **Agent** components.
|
||||
|
||||
For example, to connect bundled components to the **Language Model** core component, do the following:
|
||||
For example, to connect a provider-specific component to the **Agent** component, do the following:
|
||||
|
||||
1. In the **Language Model** component, set **Model Provider** to **Custom**.
|
||||
1. In the **Components** menu, search for your preferred model provider, and then add the provider's LLM component to your flow.
|
||||
The component may not have `model` in the name.
|
||||
For example, Azure OpenAI LLMs are in the [**Azure OpenAI** component](/bundles-azure#azure-openai).
|
||||
|
||||
The field name changes to **Language Model** and the input port changes to a `LanguageModel` port.
|
||||
2. Configure the LLM component as needed to connect to your preferred model.
|
||||
|
||||
2. Add a compatible bundled component to your flow, such as the [**Vertex AI** component for text generation](/bundles-vertexai).
|
||||
|
||||
3. Change the bundled component's output type to `LanguageModel`.
|
||||
To do this, click **Model Response** near the component's output port, and then select **Language Model**.
|
||||
3. Change the LLM component's output type from **Model Response** to **Language Model**.
|
||||
The output port changes to a `LanguageModel` port.
|
||||
For more information, see [Language Model output types](#language-model-output-types).
|
||||
|
||||
4. Connect the bundled component's output to the **Language Model** component's `LanguageModel` input port.
|
||||
2. Add an **Agent** component to the flow, and then set **Model Provider** to **Custom**.
|
||||
The **Model Provider** field changes to a **Language Model** field with a `LanguageModel` port.
|
||||
|
||||
The bundled component now provides the LLM configuration for the component that it is connected to, and you can continue building your flow as needed.
|
||||
4. Connect the LLM component's output to the **Agent** component's **Language Model** input.
|
||||
The **Agent** component now inherits the LLM settings from the connected LLM component instead of using any of the built-in models.
|
||||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -54,7 +54,34 @@ For more information about the **File** component and other data loading compone
|
|||
### Load files at runtime
|
||||
|
||||
You can use preloaded files in your flows, and you can load files at runtime, if your flow accepts file input.
|
||||
For an example, see [Create a chatbot that can ingest files](/chat-with-files).
|
||||
To enable file input in your flow, do the following:
|
||||
1. Add a [**File** component](/components-data#file) to your flow.
|
||||
2. Click **Share**, select **API access**, and then click **Input Schema** to add [`tweaks`](/concepts-publish#input-schema) to the request payload in the flow's automatically generated code snippets.
|
||||
3. Expand the **File** section, find the **Files** row, and then enable **Expose Input** to allow the parameter to be set at runtime through the Langflow API.
|
||||
4. Close the **Input Schema** pane to return to the **API access** pane.
|
||||
The payload in each code snippet now includes `tweaks`, your **File** component's ID, and the `path` key that you enabled in **Input Schema**:
|
||||
|
||||
```json
|
||||
"tweaks": {
|
||||
"File-qYD5w": {
|
||||
"path": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
5. When you run this flow programmatically, your script must upload a file to Langflow file management, and then pass the returned `file_path` to the `path` tweak in the `/run` request:
|
||||
|
||||
```json
|
||||
"tweaks": {
|
||||
"FILE_COMPONENT_ID": {
|
||||
"path": [ "file_path" ]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For a complete example see [Create a chatbot that can ingest files](/chat-with-files) and [Files endpoints](/api-files).
|
||||
|
||||
If you want to upload multiple files, you can pass multiple `file_path` values in the `path` array, such as `[ "path1", "path2" ]`.
|
||||
|
||||
## Upload images
|
||||
|
||||
|
|
|
|||
|
|
@ -29,14 +29,7 @@ For flows that require another type of input, such as a webhook event, file uplo
|
|||
|
||||

|
||||
|
||||
<details>
|
||||
<summary>Playground mechanics</summary>
|
||||
|
||||
When you run a flow in the **Playground**, Langflow calls the `/build/$FLOW_ID/flow` endpoint in [chat.py](https://github.com/langflow-ai/langflow/blob/main/src/backend/base/langflow/api/v1/chat.py#L143). This call retrieves the flow data, builds a graph, and executes the graph. As each component (or node) is executed, the `build_vertex` function calls `build_and_run`, which may call the individual components' `def_build` method, if it exists. If a component doesn't have a `def_build` function, the build still returns a component.
|
||||
|
||||
The `build` function allows components to execute logic at runtime. For example, the [**Recursive Character Text Splitter** component](https://github.com/langflow-ai/langflow/blob/main/src/backend/base/langflow/components/langchain_utilities/recursive_character.py) is a child of the `LCTextSplitterComponent` class. When text needs to be processed, the parent class's `build` method is called, which creates a `RecursiveCharacterTextSplitter` object and uses it to split the text according to the defined parameters. The split text is then passed on to the next component. This all occurs when the component is built.
|
||||
|
||||
</details>
|
||||
For technical details about how the **Playground** works, see [Monitor endpoints](/api-monitor).
|
||||
|
||||
### Review agent logic
|
||||
|
||||
|
|
|
|||
|
|
@ -39,39 +39,33 @@ The schema is defined in [`data.py`](https://github.com/langflow-ai/langflow/blo
|
|||
|
||||
The following attributes are available:
|
||||
|
||||
- `data`: A dictionary that stores key-value pairs.
|
||||
- `data`: A `Data` object stores key-value pairs within the `.data` attribute. This is the `Data` object's core dictionary. Each key is a field name, and the values can be any supported data type.
|
||||
- `text_key`: The key in `data` that is considered the primary text value.
|
||||
- `default_value`: Fallback if `text_key` is missing. The default `text_key` is `"text"`.
|
||||
|
||||
### Data structure
|
||||
|
||||
A `Data` object stores key-value pairs within the `.data` attribute, where each key is a field name and its value can be any supported data type. `text_key` tells Langflow which key in the data dictionary is the primary text value for that object.
|
||||
|
||||
```python
|
||||
data_obj = Data(
|
||||
text_key="text", # Field 1
|
||||
data={ # Field 2 (the actual dict)
|
||||
text_key="text",
|
||||
data={
|
||||
"text": "Hello world",
|
||||
"name": "Charlie",
|
||||
"age": 28
|
||||
},
|
||||
default_value="" # Field 3
|
||||
default_value=""
|
||||
)
|
||||
```
|
||||
|
||||
`Data` objects can be serialized to JSON, created from JSON, or created from other dictionary data.
|
||||
However, the resulting `Data` object is a structured object with validation and methods, not a plain dictionary.
|
||||
|
||||
For example, when serialized into JSON, the previous example becomes the following JSON object:
|
||||
For example, when serialized into JSON, the previous Python example becomes the following JSON object:
|
||||
|
||||
```json
|
||||
{
|
||||
"text_key": "text",
|
||||
"data": {
|
||||
"text": "User Profile",
|
||||
"name": "Charlie Lastname",
|
||||
"age": 28,
|
||||
"email": "charlie.lastname@example.com"
|
||||
"text": "Hello world",
|
||||
"name": "Charlie",
|
||||
"age": 28
|
||||
},
|
||||
"default_value": ""
|
||||
}
|
||||
|
|
@ -263,7 +257,7 @@ Hover over the port to see the accepted or produced data types.
|
|||
In Langflow, you can use <Icon name="TextSearch" aria-hidden="True" /> **Inspect output** to view the output of individual components.
|
||||
This can help you learn about the different data type and debug problems with invalid or malformed inputs and output.
|
||||
|
||||
The following example shows how to inspect the output of a **Type Convert** component, which can convert `Message`, `Data`, or `DataFrame` input into `Message`, `Data`, or `DataFrame` output:
|
||||
The following example shows how to inspect the output of a [**Type Convert** component](/components-processing#type-convert), which can convert data from one type to another:
|
||||
|
||||
1. Create a flow, and then connect a **Chat Input** component to a **Type Convert** component.
|
||||
|
||||
|
|
@ -344,6 +338,7 @@ The following example shows how to inspect the output of a **Type Convert** comp
|
|||
|
||||
## See also
|
||||
|
||||
- [**Processing** components](/components-processing)
|
||||
- [Custom components](/components-custom-components)
|
||||
- [Pydantic Models](https://docs.pydantic.dev/latest/api/base_model/)
|
||||
- [pandas.DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html)
|
||||
|
|
@ -317,7 +317,7 @@ Additionally, you must sign in as a superuser to manage users and [create a Lang
|
|||
uv run langflow run --env-file .env
|
||||
```
|
||||
|
||||
Starting Langflow with an `.env` file automatically authenticates you as the superuser set in `LANGFLOW_SUPERUSER` and `LANGFLOW_SUPERUSER_PASSWORD`.
|
||||
Starting Langflow with a `.env` file automatically authenticates you as the superuser set in `LANGFLOW_SUPERUSER` and `LANGFLOW_SUPERUSER_PASSWORD`.
|
||||
If you don't explicitly set these variables, the default values are `langflow` and `langflow` for system auto-login.
|
||||
|
||||
6. Verify the server is running. The default location is `http://localhost:7860`.
|
||||
|
|
|
|||
|
|
@ -230,7 +230,7 @@ Use this mode to previews the changes that would be made to the database schema
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### langflow run
|
||||
### langflow run {#langflow-run}
|
||||
|
||||
Starts the Langflow server.
|
||||
|
||||
|
|
|
|||
|
|
@ -16,12 +16,16 @@ You can set Langflow environment variables in your terminal, in `.env`, and with
|
|||
|
||||
If an environment variable is set in multiple places, the following hierarchy applies:
|
||||
|
||||
1. Langflow CLI options override `.env` and terminal variables.
|
||||
2. `.env` overrides terminal variables.
|
||||
3. Terminal variables are used only if the variable isn't set in `.env` or Langflow CLI options.
|
||||
1. Langflow CLI options override all other sources.
|
||||
2. The `.env` file overrides system environment variables.
|
||||
3. System environment variables are used only if not set elsewhere.
|
||||
|
||||
When running a Langflow Docker image, the `-e` flag sets system environment variables.
|
||||
|
||||
For example:
|
||||
- If you set `LANGFLOW_PORT=8080` in your system environment and `LANGFLOW_PORT=7860` in `.env`, Langflow uses `7860` from `.env`.
|
||||
- If you run `langflow run --port 9000` with `LANGFLOW_PORT=7860` in `.env`, Langflow uses `9000` from the CLI option.
|
||||
|
||||
For example, if you set `LANGFLOW_PORT` in `.env` and your terminal, then Langflow uses the value from `.env`.
|
||||
Similarly, if you run a Langflow CLI command with `--port`, Langflow uses that port number instead of the `LANGFLOW_PORT` in `.env`.
|
||||
|
||||
## Configure environment variables
|
||||
|
||||
|
|
@ -151,7 +155,7 @@ The following table lists the environment variables supported by Langflow.
|
|||
| `LANGFLOW_AUTO_SAVING_INTERVAL` | Integer | `1000` | Set the interval for flow auto-saving in milliseconds. |
|
||||
| `LANGFLOW_BACKEND_ONLY` | Boolean | False | Run only the Langflow backend service (no frontend). |
|
||||
| `LANGFLOW_BUNDLE_URLS` | List[String] | `[]` | A list of URLs from which to load component bundles and flows. Supports GitHub URLs. If LANGFLOW_AUTO_LOGIN is enabled, flows from these bundles are loaded into the database. |
|
||||
| `LANGFLOW_CACHE_TYPE` | String | `async` | Set the cache type for Langflow. Possible values: `async`, `redis`, `memory`, `disk`. If you set the type to `redis`, then you must also set the following environment variables: `LANGFLOW_REDIS_HOST`, `LANGFLOW_REDIS_PORT`, `LANGFLOW_REDIS_DB`, and `LANGFLOW_REDIS_CACHE_EXPIRE`. |
|
||||
| `LANGFLOW_CACHE_TYPE` | String | `async` | Set the cache type for Langflow. Possible values: `async`, `redis`, `memory`, `disk`. If you set the type to `redis`, then you must also set the following environment variables: `LANGFLOW_REDIS_HOST`, `LANGFLOW_REDIS_PORT`, `LANGFLOW_REDIS_DB`, and `LANGFLOW_REDIS_CACHE_EXPIRE`. See also [`langflow run`](/configuration-cli#langflow-run). |
|
||||
| `LANGFLOW_COMPONENTS_PATH` | String | Not set | Path to the directory containing custom components. |
|
||||
| `LANGFLOW_CONFIG_DIR` | String | Varies | Set the Langflow configuration directory where files, logs, and the Langflow database are stored. Default path depends on your installation. See [Flow storage and logs](/concepts-flows#flow-storage-and-logs). |
|
||||
| `LANGFLOW_DATABASE_URL` | String | Not set | Set the database URL for Langflow. If not provided, Langflow uses a SQLite database. |
|
||||
|
|
@ -163,14 +167,14 @@ The following table lists the environment variables supported by Langflow.
|
|||
| `LANGFLOW_DISABLE_TRACK_APIKEY_USAGE` | Boolean | False | Whether to track API key usage. If true, disables tracking of API key usage (`total_uses` and `last_used_at`) to avoid database contention under high concurrency. |
|
||||
| `LANGFLOW_ENABLE_SUPERUSER_CLI` | Boolean | True | Allow creation of superusers with the Langflow CLI command [`langflow superuser`](./configuration-cli.mdx#langflow-superuser). Recommended to be disabled (false) in production for security reasons. |
|
||||
| `LANGFLOW_FALLBACK_TO_ENV_VAR` | Boolean | True | If enabled, [global variables](/configuration-global-variables) set in your Langflow **Settings** can use an environment variable with the same name if Langflow can't retrieve the variable value from the global variables. |
|
||||
| `LANGFLOW_FRONTEND_PATH` | String | `./frontend` | Path to the frontend directory containing build files. This is for development purposes only. See [`--frontend-path`](./configuration-cli.mdx#run-frontend-path). |
|
||||
| `LANGFLOW_HEALTH_CHECK_MAX_RETRIES` | Integer | `5` | Set the maximum number of retries for the health check. See [`--health-check-max-retries`](./configuration-cli.mdx#run-health-check-max-retries). |
|
||||
| `LANGFLOW_HOST` | String | `localhost` | The host on which the Langflow server will run. See [`--host`](./configuration-cli.mdx#run-host). |
|
||||
| `LANGFLOW_LANGCHAIN_CACHE` | String | `InMemoryCache` | Type of cache to use. Possible values: `InMemoryCache`, `SQLiteCache`. See [`--cache`](./configuration-cli.mdx#run-cache). |
|
||||
| `LANGFLOW_FRONTEND_PATH` | String | `./frontend` | Path to the frontend directory containing build files. This is for development purposes only. See [`langflow run`](/configuration-cli#langflow-run). |
|
||||
| `LANGFLOW_HEALTH_CHECK_MAX_RETRIES` | Integer | `5` | Set the maximum number of retries for the health check. See [`langflow run`](/configuration-cli#langflow-run). |
|
||||
| `LANGFLOW_HOST` | String | `localhost` | The host on which the Langflow server will run. See [`langflow run`](/configuration-cli#langflow-run). |
|
||||
| `LANGFLOW_LANGCHAIN_CACHE` | String | `InMemoryCache` | Type of cache storage to use, separate from `LANGFLOW_CACHE_TYPE`. Possible values: `InMemoryCache`, `SQLiteCache`. |
|
||||
| `LANGFLOW_LOG_LEVEL` | String | `INFO` | Set the logging level for Langflow. Possible values: `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`. |
|
||||
| `LANGFLOW_LOG_FILE` | String | Not set | Path to the log file. If this option isn't set, logs are written to stdout. |
|
||||
| `LANGFLOW_LOG_RETRIEVER_BUFFER_SIZE` | Integer | `10000` | Set the buffer size for log retrieval. Only used if `LANGFLOW_ENABLE_LOG_RETRIEVAL` is enabled. |
|
||||
| `LANGFLOW_MAX_FILE_SIZE_UPLOAD` | Integer | `100` | Set the maximum file size for the upload in megabytes. See [`--max-file-size-upload`](./configuration-cli.mdx#run-max-file-size-upload). |
|
||||
| `LANGFLOW_MAX_FILE_SIZE_UPLOAD` | Integer | `100` | Set the maximum file size for the upload in megabytes. See [`langflow run`](/configuration-cli#langflow-run). |
|
||||
| `LANGFLOW_MAX_ITEMS_LENGTH` | Integer | `100` | Maximum number of items to store and display in the visual editor. Lists longer than this will be truncated when displayed in the visual editor. Doesn't affect data passed between components nor outputs. |
|
||||
| `LANGFLOW_MAX_TEXT_LENGTH` | Integer | `1000` | Maximum number of characters to store and display in the visual editor. Responses longer than this will be truncated when displayed in the visual editor. Doesn't truncate responses between components nor outputs. |
|
||||
| `LANGFLOW_MCP_SERVER_ENABLED` | Boolean | True | If this option is set to False, Langflow doesn't enable the MCP server. |
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@ slug: /logging
|
|||
---
|
||||
|
||||
import Icon from "@site/src/components/icon";
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
This page provides information about Langflow logs, including logs for individual flows and the Langflow application itself.
|
||||
|
||||
|
|
@ -94,6 +96,45 @@ When debugging issues with the format or content of a flow's output, it can help
|
|||
|
||||
To view the output produced by a single component during the most recent run, click <Icon name="TextSearch" aria-hidden="true"/> **Inspect output** on the component in the visual editor.
|
||||
|
||||
## Access Langflow Desktop logs {#desktop-logs}
|
||||
|
||||
If you encounter issues with Langflow Desktop, you might need to access startup logs for debugging.
|
||||
Follow the steps for your operating system.
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="macos" label="macOS">
|
||||
|
||||
1. Open Terminal and run:
|
||||
```bash
|
||||
cd ~/Library/Logs/com.Langflow
|
||||
```
|
||||
|
||||
2. To open the folder and view the log files, run the command:
|
||||
```bash
|
||||
open .
|
||||
```
|
||||
|
||||
3. Locate the `langflow.log` file.
|
||||
</TabItem>
|
||||
<TabItem value="windows" label="Windows">
|
||||
|
||||
1. Open the Command Prompt (CMD), and then run the following command:
|
||||
```cmd
|
||||
cd %LOCALAPPDATA%\com.langflow\logs
|
||||
```
|
||||
|
||||
2. To open the folder and view the log files, run the command:
|
||||
```cmd
|
||||
start .
|
||||
```
|
||||
|
||||
3. Locate the `langflow.log` file.
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
You can use the log file to investigate the issue on your own, add context to a [GitHub Issue](/contributing-github-issues), or send it to [support](/luna-for-langflow) for debugging assistance.
|
||||
|
||||
The log file is only created when Langflow Desktop runs. If you don't see a log file, try starting Langflow Desktop first, then check for the log file.
|
||||
## See also
|
||||
|
||||
* [Memory management options](/memory)
|
||||
|
|
|
|||
|
|
@ -26,6 +26,8 @@ However, some features aren't available for Langflow Desktop, such as the **Shar
|
|||
<Tabs>
|
||||
<TabItem value="macos" label="macOS" default>
|
||||
|
||||
Langflow Desktop requires macOS 13 or later.
|
||||
|
||||
1. Navigate to [Langflow Desktop](https://www.langflow.org/desktop).
|
||||
2. Click **Download Langflow**, enter your contact information, and then click **Download**.
|
||||
3. Mount and install the Langflow application.
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ For more information, see the [NV-Ingest documentation](https://nvidia.github.io
|
|||
| extract_infographics | Extract Infographics | Extract infographics from document. Default: false. |
|
||||
| text_depth | Text Depth | The level at which text is extracted. Options: 'document', 'page', 'block', 'line', 'span'. Default: `page`. |
|
||||
| split_text | Split Text | Split text into smaller chunks. Default: true. |
|
||||
| chunk_size | Chunk Size | The number of tokens per chunk. Default: `500`. |
|
||||
| chunk_size | Chunk Size | The number of tokens per chunk. Default: `500`. Make sure the chunk size is compatible with your embedding model. For more information, see [Tokenization errors due to chunk size](/components-processing#chunk-size). |
|
||||
| chunk_overlap | Chunk Overlap | Number of tokens to overlap from previous chunk. Default: `150`. |
|
||||
| filter_images | Filter Images | Filter images (see advanced options for filtering criteria). Default: false. |
|
||||
| min_image_size | Minimum Image Size Filter | Minimum image width/length in pixels. Default: `128`. |
|
||||
|
|
|
|||
|
|
@ -1,20 +1,14 @@
|
|||
---
|
||||
title: Enterprise support for Langflow
|
||||
title: IBM Elite Support for Langflow
|
||||
slug: /luna-for-langflow
|
||||
---
|
||||
|
||||
With **Luna for Langflow** support, you can develop and deploy Langflow applications with confidence.
|
||||
IBM Elite Support offers enterprise support for Langflow to help you develop and deploy Langflow applications with confidence.
|
||||
|
||||
Luna is a subscription to the Langflow expertise at DataStax. It's meant for Langflow users who want all the benefits of running their own open-source deployments, as well as the peace of mind that comes with having direct access to the team that has authored the majority of the Langflow code.
|
||||
IBM Elite Support for Langflow is a subscription to the Langflow expertise at IBM.
|
||||
It's meant for Langflow users who want all the benefits of running their own open-source deployments, as well as the peace of mind that comes with having direct access to the team that has authored the majority of the Langflow code.
|
||||
|
||||
Luna subscribers can get help with general-purpose and technical questions for their open-source Langflow deployments.
|
||||
If an issue is encountered, DataStax is there to help.
|
||||
IBM Elite Support subscribers can get help with general-purpose and technical questions for their open-source Langflow deployments.
|
||||
If an issue is encountered, IBM is there to help.
|
||||
|
||||
:::info
|
||||
As of May 2025, Luna for Langflow support covers Langflow versions 1.4.x.
|
||||
|
||||
Subscribers must run a supported Python version to receive support.
|
||||
Supported versions are `>=3.10, <3.14`, which includes all version from 3.10 through 3.13.x, but not 3.14.
|
||||
:::
|
||||
|
||||
To subscribe or learn more, see [Luna for Langflow](https://www.datastax.com/products/luna-langflow).
|
||||
To subscribe or learn more, see [IBM Elite Support for Langflow](https://ibm.com/docs/esfl?topic=elite-support-langflow-specification).
|
||||
|
|
@ -61,7 +61,7 @@ For all changes, see the [Changelog](https://github.com/langflow-ai/langflow/rel
|
|||
The [**Language Model** component](/components-models) and [**Embedding Model** component](/components-embedding-models) are now core components for your LLM and embeddings flows. They support multiple models and model providers, and allow you to experiment with different models without swapping out single-provider components.
|
||||
Find them in the **Components** menu in the **Models** category.
|
||||
|
||||
The single-provider components are still available for your flows in the **Components** menu in the [**Bundles**](/components-bundle-components) section, and you can connect them to the **Language Model** and **Embedding Model** components with the **Custom** provider option.
|
||||
The single-provider components are still available for your flows in the **Components** menu in the [**Bundles**](/components-bundle-components) section, and you can use them to replace the **Language Model** and **Embedding Model** core components, or connect them to the **Agent** component with the **Custom** provider option.
|
||||
|
||||
- MCP server one-click installation
|
||||
|
||||
|
|
@ -107,7 +107,7 @@ For all changes, see the [Changelog](https://github.com/langflow-ai/langflow/rel
|
|||
- Enhanced file and flow management system with improved bulk capabilities.
|
||||
- Added the **BigQuery** component
|
||||
- Added the **Twelve Labs** bundle
|
||||
- Added the **NVIDIA System Assistant** component
|
||||
- Added the **NVIDIA G-Assist** component
|
||||
|
||||
### Deprecations
|
||||
|
||||
|
|
|
|||
|
|
@ -124,6 +124,21 @@ There are two possible reasons for this error:
|
|||
Environment variables set in your terminal aren't automatically available to GUI-based applications like Langflow Desktop when launched through the Finder or the Start Menu.
|
||||
To set environment variables for Langflow Desktop, see [Set environment variables for Langflow Desktop](/environment-variables#set-environment-variables-for-langflow-desktop).
|
||||
|
||||
### Access Langflow Desktop startup logs
|
||||
|
||||
If you encounter issues with Langflow Desktop, you might need to [access Langflow Desktop startup logs](/logging#desktop-logs) for debugging.
|
||||
|
||||
### User not found or inactive when running multiple flows
|
||||
|
||||
When running multiple local Langflow OSS instances on different ports, such as `localhost:7860` and `localhost:7861`, you might see authentication errors in the logs.
|
||||
For example:
|
||||
|
||||
```text
|
||||
[07/22/25 10:57:07] INFO 2025-07-22 10:57:07 - INFO - utils - User not found or inactive.
|
||||
```
|
||||
|
||||
To resolve this error, use separate browser instances or browser profiles to access each Langflow instance.
|
||||
|
||||
### Package is not installed
|
||||
|
||||
In Langflow OSS, you can follow the error message's instructions to install the missing dependency.
|
||||
|
|
@ -146,13 +161,22 @@ The following error can occur during Langflow upgrades when the new version can'
|
|||
|
||||
To resolve this error, clear the cache by deleting the contents of your Langflow cache folder.
|
||||
The filepath depends on your operating system, installation type, and configuration options.
|
||||
For more information and default filepaths, see [Memory management options](/memory#flow-storage-and-logs).
|
||||
For more information and default filepaths, see [Memory management options](/memory).
|
||||
|
||||
:::important
|
||||
Clearing the cache erases your settings.
|
||||
If you want to retain your settings files, create a backup of those files before clearing the cache folder.
|
||||
:::
|
||||
|
||||
### Langflow Desktop says it is running the latest version, but it is actually behind
|
||||
|
||||
If you are running Langflow Desktop version 1.4.2 or earlier, the UI might incorrectly report that you are on the latest version when a newer version is available.
|
||||
|
||||
This happens because the automatic update feature in the UI was introduced in version 1.4.2.
|
||||
Earlier versions can't automatically detect or apply updates.
|
||||
|
||||
To resolve this issue, uninstall Langflow Desktop, and then [download and install the latest version of Langflow Desktop](https://langflow.org/desktop).
|
||||
|
||||
## Langflow uninstall issues
|
||||
|
||||
The following issues can occur when uninstalling Langflow.
|
||||
|
|
@ -174,12 +198,41 @@ To fully remove a Langflow Desktop macOS installation, you must also delete `~/.
|
|||
- [Use MCP Inspector to test and debug flows](/mcp-server#test-and-debug-flows)
|
||||
- [Troubleshooting MCP server](/mcp-server#troubleshooting-mcp-server)
|
||||
|
||||
## Token length limit errors in Embedding Model components
|
||||
|
||||
Token length errors can happen if your chunking strategy doesn't align with your embedding model's tokenization limits.
|
||||
For more information, see [Tokenization errors due to chunk size](/components-processing#chunk-size).
|
||||
|
||||
## Custom components and integrations issues
|
||||
|
||||
For troubleshooting advice for a third-party integration, see the information about that integration in the Langflow documentation and the provider's documentation.
|
||||
|
||||
If you are building a custom component, see [Error handling and logging for custom Python components](/components-custom-components#error-handling-and-logging).
|
||||
|
||||
### Custom components not appearing in the visual editor
|
||||
|
||||
If your custom components are not appearing in the Langflow visual editor, try the following troubleshooting steps:
|
||||
|
||||
1. Ensure your components follow the [required directory structure](https://docs.langflow.org/components-custom-components#directory-structure-requirements).
|
||||
```
|
||||
/your/custom/components/path/ # Base directory set by LANGFLOW_COMPONENTS_PATH
|
||||
└── category_name/ # Required category subfolder that determines menu name
|
||||
├── __init__.py # Required
|
||||
└── custom_component.py # Component file
|
||||
```
|
||||
|
||||
2. Verify each category directory includes an `__init__.py` file.
|
||||
This is required for Python to recognize the directory as a module.
|
||||
|
||||
3. Use the command line argument instead of the environment variable for `LANGFLOW_COMPONENTS_PATH`.
|
||||
If you're using the `LANGFLOW_COMPONENTS_PATH` environment variable and components aren't loading, try the `--components-path` command line argument instead:
|
||||
|
||||
```bash
|
||||
uv run langflow run --components-path /path/to/your/custom/components
|
||||
```
|
||||
|
||||
If you continue to experience issues, please [report them on GitHub](https://github.com/langflow-ai/langflow/issues) with details about your directory structure and component setup.
|
||||
|
||||
## See also
|
||||
|
||||
- [Langflow GitHub Issues and Discussions](/contributing-github-issues)
|
||||
|
|
|
|||
|
|
@ -31,8 +31,10 @@ The following steps modify the **Basic Prompting** template to accept file input
|
|||
2. In the **Language Model** component, enter your OpenAI API key.
|
||||
|
||||
If you want to use a different provider or model, edit the **Model Provider**, **Model Name**, and **API Key** fields accordingly.
|
||||
|
||||
3. To verify that your API key is valid, click <Icon name="Play" aria-hidden="true" /> **Playground**, and then ask the LLM a question.
|
||||
The LLM should respond according to the specifications in the **Prompt Template** component's **Template** field.
|
||||
|
||||
4. Exit the **Playground**, and then modify the **Prompt Template** component to accept file input in addition to chat input.
|
||||
To do this, edit the **Template** field, and then replace the default prompt with the following text:
|
||||
|
||||
|
|
|
|||
|
|
@ -400,13 +400,6 @@ const config = {
|
|||
},
|
||||
},
|
||||
footer: {
|
||||
logo: {
|
||||
alt: "Langflow",
|
||||
src: "img/lf-docs-light.svg",
|
||||
srcDark: "img/lf-docs-dark.svg",
|
||||
width: 160,
|
||||
height: 40,
|
||||
},
|
||||
links: [
|
||||
{
|
||||
title: null,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
"openapi": "3.1.0",
|
||||
"info": {
|
||||
"title": "Langflow",
|
||||
"version": "1.5.0.post1"
|
||||
"version": "1.5.0.post2"
|
||||
},
|
||||
"paths": {
|
||||
"/api/v1/build/{flow_id}/vertices": {
|
||||
|
|
|
|||
|
|
@ -471,7 +471,7 @@ module.exports = {
|
|||
{
|
||||
type: "doc",
|
||||
id: "Support/luna-for-langflow",
|
||||
label: "Enterprise support",
|
||||
label: "IBM Elite Support for Langflow",
|
||||
},
|
||||
],
|
||||
},
|
||||
|
|
|
|||
|
|
@ -24,8 +24,8 @@ export default function FooterWrapper(props) {
|
|||
onMouseLeave={() => setIsHovered(false)}
|
||||
style={{
|
||||
position: 'fixed',
|
||||
right: '20px',
|
||||
bottom: '20px',
|
||||
right: '21px',
|
||||
bottom: '21px',
|
||||
zIndex: 100,
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
|
|
|
|||
BIN
docs/static/img/conditional-looping.png
vendored
Normal file
BIN
docs/static/img/conditional-looping.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 190 KiB |
|
|
@ -1,6 +1,6 @@
|
|||
[project]
|
||||
name = "langflow"
|
||||
version = "1.5.0.post1"
|
||||
version = "1.6.0"
|
||||
description = "A Python package with a built-in web application"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
license = "MIT"
|
||||
|
|
@ -31,7 +31,7 @@ dependencies = [
|
|||
"faiss-cpu==1.9.0.post1",
|
||||
"types-cachetools==5.5.0.20240820",
|
||||
"pymongo==4.10.1",
|
||||
"supabase==2.6.0",
|
||||
"supabase>=2.6.0,<3.0.0",
|
||||
"certifi>=2023.11.17,<2025.0.0",
|
||||
"certifi==2024.8.30",
|
||||
'fastavro==1.9.7; python_version < "3.13"',
|
||||
|
|
@ -52,7 +52,7 @@ dependencies = [
|
|||
"dspy-ai==2.5.41",
|
||||
"datasets>2.14.7",
|
||||
"assemblyai==0.35.1",
|
||||
"litellm==1.60.2",
|
||||
"litellm>=1.60.2,<2.0.0",
|
||||
"chromadb==0.5.23",
|
||||
"zep-python==2.0.2",
|
||||
"youtube-transcript-api==0.6.3",
|
||||
|
|
@ -61,13 +61,13 @@ dependencies = [
|
|||
"GitPython==3.1.43",
|
||||
"kubernetes==31.0.0",
|
||||
"json_repair==0.30.3",
|
||||
"langwatch==0.1.16",
|
||||
"langwatch>=0.2.11,<0.3.0",
|
||||
"langsmith>=0.3.42,<1.0.0",
|
||||
"yfinance==0.2.50",
|
||||
"wolframalpha==5.1.3",
|
||||
"astra-assistants[tools]~=2.2.12",
|
||||
"composio-langchain==0.7.15",
|
||||
"composio-core==0.7.15",
|
||||
"astra-assistants[tools]>=2.2.13,<3.0.0",
|
||||
"composio==0.8.5",
|
||||
"composio-langchain==0.8.5",
|
||||
"spider-client==0.1.24",
|
||||
"nltk==3.9.1",
|
||||
"lark==1.2.2",
|
||||
|
|
@ -77,10 +77,11 @@ dependencies = [
|
|||
"opensearch-py==2.8.0",
|
||||
"langchain-google-genai==2.0.6",
|
||||
"langchain-cohere==0.3.3",
|
||||
"langchain-huggingface==0.3.1",
|
||||
"langchain-anthropic==0.3.14",
|
||||
"langchain-astradb~=0.6.0",
|
||||
"langchain-openai>=0.2.12",
|
||||
"langchain-google-vertexai==2.0.7",
|
||||
"langchain-google-vertexai>=2.0.7,<3.0.0",
|
||||
"langchain-groq==0.2.1",
|
||||
"langchain-pinecone>=0.2.8",
|
||||
"langchain-mistralai==0.2.3",
|
||||
|
|
@ -112,8 +113,6 @@ dependencies = [
|
|||
"pydantic-ai>=0.0.19",
|
||||
"smolagents>=1.8.0",
|
||||
"apify-client>=1.8.1",
|
||||
"pylint>=3.3.4",
|
||||
"ruff>=0.9.7",
|
||||
"langchain-graph-retriever==0.6.1",
|
||||
"graph-retriever==0.6.1",
|
||||
"ibm-watsonx-ai>=1.3.1",
|
||||
|
|
@ -126,6 +125,10 @@ dependencies = [
|
|||
"docling_core>=2.36.1",
|
||||
"filelock>=3.18.0",
|
||||
"jigsawstack==0.2.7",
|
||||
"structlog>=25.4.0",
|
||||
"aiosqlite==0.21.0",
|
||||
"fastparquet>=2024.11.0",
|
||||
"traceloop-sdk>=0.43.1",
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
|
|
@ -134,8 +137,8 @@ dev = [
|
|||
"types-redis>=4.6.0.5",
|
||||
"ipykernel>=6.29.0",
|
||||
"mypy>=1.11.0",
|
||||
"ruff>=0.9.7,<0.10",
|
||||
"httpx>=0.27.0",
|
||||
"ruff>=0.12.7",
|
||||
"httpx>=0.28.1",
|
||||
"pytest>=8.2.0",
|
||||
"types-requests>=2.32.0",
|
||||
"requests>=2.32.0",
|
||||
|
|
@ -176,6 +179,7 @@ dev = [
|
|||
"pytest-timeout>=2.3.1",
|
||||
"pyyaml>=6.0.2",
|
||||
"pyleak>=0.1.14",
|
||||
"docling>=2.36.1"
|
||||
]
|
||||
|
||||
[tool.uv.sources]
|
||||
|
|
@ -195,6 +199,9 @@ Documentation = "https://docs.langflow.org"
|
|||
[project.optional-dependencies]
|
||||
docling = [
|
||||
"docling>=2.36.1",
|
||||
"tesserocr>=2.8.0",
|
||||
"rapidocr-onnxruntime>=1.4.4",
|
||||
"ocrmac>=1.0.0; sys_platform == 'darwin'",
|
||||
]
|
||||
|
||||
audio = [
|
||||
|
|
@ -217,8 +224,12 @@ clickhouse-connect = [
|
|||
]
|
||||
|
||||
nv-ingest = [
|
||||
"nv-ingest-api==2025.4.22.dev20250422",
|
||||
"nv-ingest-client==2025.4.22.dev20250422",
|
||||
# NOTE: These must be removed in order to run `uv lock --upgrade` or `uv sync --upgrade`
|
||||
# due to incompatibility with <3.12 and how uv handles lockfile creation.
|
||||
# If upgrading, ensure `uv lock` and `uv sync` are run after upgrade with these,
|
||||
# and that afterward, the lockfile contains nv-ingest.
|
||||
"nv-ingest-api==25.6.2,<26.0.0 ; python_version >= '3.12'",
|
||||
"nv-ingest-client==25.6.3,<26.0.0 ; python_version >= '3.12'",
|
||||
]
|
||||
|
||||
postgresql = [
|
||||
|
|
@ -254,6 +265,7 @@ log_cli_date_format = "%Y-%m-%d %H:%M:%S"
|
|||
markers = ["async_test", "api_key_required", "no_blockbuster", "benchmark"]
|
||||
asyncio_mode = "auto"
|
||||
asyncio_default_fixture_loop_scope = "function"
|
||||
addopts = "-p no:benchmark"
|
||||
|
||||
[tool.coverage.run]
|
||||
command_line = """
|
||||
|
|
@ -296,7 +308,9 @@ ignore = [
|
|||
"TD002", # Missing author in TODO
|
||||
"TD003", # Missing issue link in TODO
|
||||
"TRY301", # A bit too harsh (Abstract `raise` to an inner function)
|
||||
|
||||
"PLC0415", # Inline imports
|
||||
"D10", # Missing docstrings
|
||||
"PLW1641", # Object does not implement `__hash__` method (mutable objects shouldn't be hashable)
|
||||
# Rules that are TODOs
|
||||
"ANN",
|
||||
]
|
||||
|
|
@ -306,6 +320,7 @@ external = ["RUF027"]
|
|||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"scripts/*" = ["D1", "INP", "T201"]
|
||||
"src/backend/base/langflow/alembic/versions/*" = ["INP001", "D415", "PGH003"]
|
||||
"src/backend/tests/*" = [
|
||||
"D1",
|
||||
"PLR2004",
|
||||
|
|
|
|||
|
|
@ -1,14 +1,14 @@
|
|||
@echo off
|
||||
echo Starting Langflow build and run process...
|
||||
|
||||
REM Check if .env file exists and set env file parameter
|
||||
set "ENV_FILE_PARAM="
|
||||
REM Check if .env file exists and set env file flag
|
||||
set "USE_ENV_FILE="
|
||||
REM Get the script directory and resolve project root
|
||||
for %%I in ("%~dp0..\..") do set "PROJECT_ROOT=%%~fI"
|
||||
set "ENV_PATH=%PROJECT_ROOT%\.env"
|
||||
if exist "%ENV_PATH%" (
|
||||
echo Found .env file at: %ENV_PATH%
|
||||
set "ENV_FILE_PARAM=--env-file \"%ENV_PATH%\""
|
||||
set "USE_ENV_FILE=1"
|
||||
) else (
|
||||
echo .env file not found at: %ENV_PATH%
|
||||
echo Langflow will use default configuration
|
||||
|
|
@ -85,8 +85,8 @@ echo Step 4: Running Langflow...
|
|||
echo.
|
||||
echo Attention: Wait until uvicorn is running before opening the browser
|
||||
echo.
|
||||
if defined ENV_FILE_PARAM (
|
||||
uv run langflow run %ENV_FILE_PARAM%
|
||||
if defined USE_ENV_FILE (
|
||||
uv run --env-file "%ENV_PATH%" langflow run
|
||||
) else (
|
||||
uv run langflow run
|
||||
)
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ Write-Host "`nStep 4: Running Langflow..." -ForegroundColor Yellow
|
|||
Write-Host "`nAttention: Wait until uvicorn is running before opening the browser" -ForegroundColor Red
|
||||
try {
|
||||
if ($useEnvFile) {
|
||||
& uv run langflow run --env-file $envPath
|
||||
& uv run --env-file $envPath langflow run
|
||||
} else {
|
||||
& uv run langflow run
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,7 +15,9 @@ import click
|
|||
import httpx
|
||||
import typer
|
||||
from dotenv import load_dotenv
|
||||
from fastapi import HTTPException
|
||||
from httpx import HTTPError
|
||||
from jose import JWTError
|
||||
from multiprocess import cpu_count
|
||||
from multiprocess.context import Process
|
||||
from packaging import version as pkg_version
|
||||
|
|
@ -29,9 +31,9 @@ from langflow.cli.progress import create_langflow_progress
|
|||
from langflow.initial_setup.setup import get_or_create_default_folder
|
||||
from langflow.logging.logger import configure, logger
|
||||
from langflow.main import setup_app
|
||||
from langflow.services.database.utils import session_getter
|
||||
from langflow.services.auth.utils import check_key, get_current_user_by_jwt
|
||||
from langflow.services.deps import get_db_service, get_settings_service, session_scope
|
||||
from langflow.services.settings.constants import DEFAULT_SUPERUSER
|
||||
from langflow.services.settings.constants import DEFAULT_SUPERUSER, DEFAULT_SUPERUSER_PASSWORD
|
||||
from langflow.services.utils import initialize_services
|
||||
from langflow.utils.version import fetch_latest_version, get_version_info
|
||||
from langflow.utils.version import is_pre_release as langflow_is_pre_release
|
||||
|
|
@ -160,7 +162,7 @@ def wait_for_server_ready(host, port, protocol) -> None:
|
|||
except HTTPError:
|
||||
time.sleep(1)
|
||||
except Exception: # noqa: BLE001
|
||||
logger.opt(exception=True).debug("Error while waiting for the server to become ready.")
|
||||
logger.debug("Error while waiting for the server to become ready.", exc_info=True)
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
|
|
@ -632,41 +634,138 @@ def print_banner(host: str, port: int, protocol: str) -> None:
|
|||
|
||||
@app.command()
|
||||
def superuser(
|
||||
username: str = typer.Option(..., prompt=True, help="Username for the superuser."),
|
||||
password: str = typer.Option(..., prompt=True, hide_input=True, help="Password for the superuser."),
|
||||
username: str = typer.Option(
|
||||
None, help="Username for the superuser. Defaults to 'langflow' when AUTO_LOGIN is enabled."
|
||||
),
|
||||
password: str = typer.Option(
|
||||
None, help="Password for the superuser. Defaults to 'langflow' when AUTO_LOGIN is enabled."
|
||||
),
|
||||
log_level: str = typer.Option("error", help="Logging level.", envvar="LANGFLOW_LOG_LEVEL"),
|
||||
auth_token: str = typer.Option(
|
||||
None, help="Authentication token of existing superuser.", envvar="LANGFLOW_SUPERUSER_TOKEN"
|
||||
),
|
||||
) -> None:
|
||||
"""Create a superuser."""
|
||||
"""Create a superuser.
|
||||
|
||||
When AUTO_LOGIN is enabled, uses default credentials.
|
||||
In production mode, requires authentication.
|
||||
"""
|
||||
configure(log_level=log_level)
|
||||
db_service = get_db_service()
|
||||
|
||||
async def _create_superuser():
|
||||
await initialize_services()
|
||||
async with session_getter(db_service) as session:
|
||||
from langflow.services.auth.utils import create_super_user
|
||||
asyncio.run(_create_superuser(username, password, auth_token))
|
||||
|
||||
if await create_super_user(db=session, username=username, password=password):
|
||||
# Verify that the superuser was created
|
||||
from langflow.services.database.models.user.model import User
|
||||
|
||||
stmt = select(User).where(User.username == username)
|
||||
user: User = (await session.exec(stmt)).first()
|
||||
if user is None or not user.is_superuser:
|
||||
typer.echo("Superuser creation failed.")
|
||||
return
|
||||
# Now create the first folder for the user
|
||||
result = await get_or_create_default_folder(session, user.id)
|
||||
if result:
|
||||
typer.echo("Default folder created successfully.")
|
||||
else:
|
||||
msg = "Could not create default folder."
|
||||
raise RuntimeError(msg)
|
||||
typer.echo("Superuser created successfully.")
|
||||
async def _create_superuser(username: str, password: str, auth_token: str | None):
|
||||
"""Create a superuser."""
|
||||
await initialize_services()
|
||||
|
||||
else:
|
||||
settings_service = get_settings_service()
|
||||
# Check if superuser creation via CLI is enabled
|
||||
if not settings_service.auth_settings.ENABLE_SUPERUSER_CLI:
|
||||
typer.echo("Error: Superuser creation via CLI is disabled.")
|
||||
typer.echo("Set LANGFLOW_ENABLE_SUPERUSER_CLI=true to enable this feature.")
|
||||
raise typer.Exit(1)
|
||||
|
||||
if settings_service.auth_settings.AUTO_LOGIN:
|
||||
# Force default credentials for AUTO_LOGIN mode
|
||||
username = DEFAULT_SUPERUSER
|
||||
password = DEFAULT_SUPERUSER_PASSWORD
|
||||
else:
|
||||
# Production mode - prompt for credentials if not provided
|
||||
if not username:
|
||||
username = typer.prompt("Username")
|
||||
if not password:
|
||||
password = typer.prompt("Password", hide_input=True)
|
||||
|
||||
from langflow.services.database.models.user.crud import get_all_superusers
|
||||
|
||||
existing_superusers = []
|
||||
async with session_scope() as session:
|
||||
# Note that the default superuser is created by the initialize_services() function,
|
||||
# but leaving this check here in case we change that behavior
|
||||
existing_superusers = await get_all_superusers(session)
|
||||
is_first_setup = len(existing_superusers) == 0
|
||||
|
||||
# If AUTO_LOGIN is true, only allow default superuser creation
|
||||
if settings_service.auth_settings.AUTO_LOGIN:
|
||||
if not is_first_setup:
|
||||
typer.echo("Error: Cannot create additional superusers when AUTO_LOGIN is enabled.")
|
||||
typer.echo("AUTO_LOGIN mode is for development with only the default superuser.")
|
||||
typer.echo("To create additional superusers:")
|
||||
typer.echo("1. Set LANGFLOW_AUTO_LOGIN=false")
|
||||
typer.echo("2. Run this command again with --auth-token")
|
||||
raise typer.Exit(1)
|
||||
|
||||
typer.echo(f"AUTO_LOGIN enabled. Creating default superuser '{username}'...")
|
||||
typer.echo(f"Note: Default credentials are {DEFAULT_SUPERUSER}/{DEFAULT_SUPERUSER_PASSWORD}")
|
||||
# AUTO_LOGIN is false - production mode
|
||||
elif is_first_setup:
|
||||
typer.echo("No superusers found. Creating first superuser...")
|
||||
else:
|
||||
# Authentication is required in production mode
|
||||
if not auth_token:
|
||||
typer.echo("Error: Creating a superuser requires authentication.")
|
||||
typer.echo("Please provide --auth-token with a valid superuser API key or JWT token.")
|
||||
typer.echo("To get a token, use: `uv run langflow api_key`")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Validate the auth token
|
||||
try:
|
||||
auth_user = None
|
||||
async with session_scope() as session:
|
||||
# Try JWT first
|
||||
user = None
|
||||
try:
|
||||
user = await get_current_user_by_jwt(auth_token, session)
|
||||
except (JWTError, HTTPException):
|
||||
# Try API key
|
||||
api_key_result = await check_key(session, auth_token)
|
||||
if api_key_result and hasattr(api_key_result, "is_superuser"):
|
||||
user = api_key_result
|
||||
auth_user = user
|
||||
|
||||
if not auth_user or not auth_user.is_superuser:
|
||||
typer.echo(
|
||||
"Error: Invalid token or insufficient privileges. Only superusers can create other superusers."
|
||||
)
|
||||
raise typer.Exit(1)
|
||||
except typer.Exit:
|
||||
raise # Re-raise typer.Exit without wrapping
|
||||
except Exception as e: # noqa: BLE001
|
||||
typer.echo(f"Error: Authentication failed - {e!s}")
|
||||
raise typer.Exit(1) from None
|
||||
|
||||
# Auth complete, create the superuser
|
||||
async with session_scope() as session:
|
||||
from langflow.services.auth.utils import create_super_user
|
||||
|
||||
if await create_super_user(db=session, username=username, password=password):
|
||||
# Verify that the superuser was created
|
||||
from langflow.services.database.models.user.model import User
|
||||
|
||||
stmt = select(User).where(User.username == username)
|
||||
created_user: User = (await session.exec(stmt)).first()
|
||||
if created_user is None or not created_user.is_superuser:
|
||||
typer.echo("Superuser creation failed.")
|
||||
return
|
||||
# Now create the first folder for the user
|
||||
result = await get_or_create_default_folder(session, created_user.id)
|
||||
if result:
|
||||
typer.echo("Default folder created successfully.")
|
||||
else:
|
||||
msg = "Could not create default folder."
|
||||
raise RuntimeError(msg)
|
||||
|
||||
asyncio.run(_create_superuser())
|
||||
# Log the superuser creation for audit purposes
|
||||
logger.warning(
|
||||
f"SECURITY AUDIT: New superuser '{username}' created via CLI command"
|
||||
+ (" by authenticated user" if auth_token else " (first-time setup)")
|
||||
)
|
||||
typer.echo("Superuser created successfully.")
|
||||
|
||||
else:
|
||||
logger.error(f"SECURITY AUDIT: Failed attempt to create superuser '{username}' via CLI")
|
||||
typer.echo("Superuser creation failed.")
|
||||
|
||||
|
||||
# command to copy the langflow database from the cache to the current directory
|
||||
|
|
@ -749,6 +848,7 @@ def api_key(
|
|||
settings_service = get_settings_service()
|
||||
auth_settings = settings_service.auth_settings
|
||||
if not auth_settings.AUTO_LOGIN:
|
||||
# TODO: Allow non-auto-login users to create API keys via CLI
|
||||
typer.echo("Auto login is disabled. API keys cannot be created through the CLI.")
|
||||
return None
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,122 @@
|
|||
"""Encrypt existing MCP auth_settings credentials
|
||||
|
||||
Revision ID: 0882f9657f22
|
||||
Revises: 1cb603706752
|
||||
Create Date: 2025-08-21 20:11:26.504681
|
||||
|
||||
"""
|
||||
import json
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
import sqlmodel
|
||||
from sqlalchemy.engine.reflection import Inspector
|
||||
from langflow.utils import migration
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '0882f9657f22'
|
||||
down_revision: Union[str, None] = '1cb603706752'
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Encrypt sensitive fields in existing auth_settings data."""
|
||||
conn = op.get_bind()
|
||||
|
||||
# Import encryption utilities
|
||||
try:
|
||||
from langflow.services.auth.mcp_encryption import encrypt_auth_settings
|
||||
from langflow.services.deps import get_settings_service
|
||||
|
||||
# Check if the folder table exists
|
||||
inspector = sa.inspect(conn)
|
||||
if 'folder' not in inspector.get_table_names():
|
||||
return
|
||||
|
||||
# Query all folders with auth_settings
|
||||
result = conn.execute(
|
||||
sa.text("SELECT id, auth_settings FROM folder WHERE auth_settings IS NOT NULL")
|
||||
)
|
||||
|
||||
# Encrypt auth_settings for each folder
|
||||
for row in result:
|
||||
folder_id = row.id
|
||||
auth_settings = row.auth_settings
|
||||
|
||||
if auth_settings:
|
||||
try:
|
||||
# Parse JSON if it's a string
|
||||
if isinstance(auth_settings, str):
|
||||
auth_settings_dict = json.loads(auth_settings)
|
||||
else:
|
||||
auth_settings_dict = auth_settings
|
||||
|
||||
# Encrypt sensitive fields
|
||||
encrypted_settings = encrypt_auth_settings(auth_settings_dict)
|
||||
|
||||
# Update the record with encrypted data
|
||||
if encrypted_settings:
|
||||
conn.execute(
|
||||
sa.text("UPDATE folder SET auth_settings = :auth_settings WHERE id = :id"),
|
||||
{"auth_settings": json.dumps(encrypted_settings), "id": folder_id}
|
||||
)
|
||||
except Exception as e:
|
||||
# Log the error but continue with other records
|
||||
print(f"Warning: Failed to encrypt auth_settings for folder {folder_id}: {e}")
|
||||
|
||||
except ImportError as e:
|
||||
# If encryption utilities are not available, skip the migration
|
||||
print(f"Warning: Encryption utilities not available, skipping encryption migration: {e}")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Decrypt sensitive fields in auth_settings data (for rollback)."""
|
||||
conn = op.get_bind()
|
||||
|
||||
# Import decryption utilities
|
||||
try:
|
||||
from langflow.services.auth.mcp_encryption import decrypt_auth_settings
|
||||
from langflow.services.deps import get_settings_service
|
||||
|
||||
# Check if the folder table exists
|
||||
inspector = sa.inspect(conn)
|
||||
if 'folder' not in inspector.get_table_names():
|
||||
return
|
||||
|
||||
# Query all folders with auth_settings
|
||||
result = conn.execute(
|
||||
sa.text("SELECT id, auth_settings FROM folder WHERE auth_settings IS NOT NULL")
|
||||
)
|
||||
|
||||
# Decrypt auth_settings for each folder
|
||||
for row in result:
|
||||
folder_id = row.id
|
||||
auth_settings = row.auth_settings
|
||||
|
||||
if auth_settings:
|
||||
try:
|
||||
# Parse JSON if it's a string
|
||||
if isinstance(auth_settings, str):
|
||||
auth_settings_dict = json.loads(auth_settings)
|
||||
else:
|
||||
auth_settings_dict = auth_settings
|
||||
|
||||
# Decrypt sensitive fields
|
||||
decrypted_settings = decrypt_auth_settings(auth_settings_dict)
|
||||
|
||||
# Update the record with decrypted data
|
||||
if decrypted_settings:
|
||||
conn.execute(
|
||||
sa.text("UPDATE folder SET auth_settings = :auth_settings WHERE id = :id"),
|
||||
{"auth_settings": json.dumps(decrypted_settings), "id": folder_id}
|
||||
)
|
||||
except Exception as e:
|
||||
# Log the error but continue with other records
|
||||
print(f"Warning: Failed to decrypt auth_settings for folder {folder_id}: {e}")
|
||||
|
||||
except ImportError as e:
|
||||
# If decryption utilities are not available, skip the migration
|
||||
print(f"Warning: Decryption utilities not available, skipping decryption migration: {e}")
|
||||
|
|
@ -0,0 +1,279 @@
|
|||
"""Modify uniqueness constraint on file names
|
||||
|
||||
Revision ID: 1cb603706752
|
||||
Revises: 3162e83e485f
|
||||
Create Date: 2025-07-24 07:02:14.896583
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
from typing import Sequence, Union, Iterable, Optional, Set, Tuple
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import inspect
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "1cb603706752"
|
||||
down_revision: Union[str, None] = "3162e83e485f"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Behavior constants
|
||||
DUPLICATE_SUFFIX_START = 2 # first suffix to use, e.g., "name_2.ext"
|
||||
BATCH_SIZE = 1000 # Process duplicates in batches for large datasets
|
||||
|
||||
|
||||
def _get_unique_constraints_by_columns(
|
||||
inspector, table: str, expected_cols: Iterable[str]
|
||||
) -> Optional[str]:
|
||||
"""Return the name of a unique constraint that matches the exact set of expected columns."""
|
||||
expected = set(expected_cols)
|
||||
for c in inspector.get_unique_constraints(table):
|
||||
cols = set(c.get("column_names") or [])
|
||||
if cols == expected:
|
||||
return c.get("name")
|
||||
return None
|
||||
|
||||
|
||||
def _split_base_ext(name: str) -> Tuple[str, str]:
|
||||
"""Split a filename into (base, ext) where ext does not include the leading dot; ext may be ''."""
|
||||
if "." in name:
|
||||
base, ext = name.rsplit(".", 1)
|
||||
return base, ext
|
||||
return name, ""
|
||||
|
||||
|
||||
def _escape_like(s: str) -> str:
|
||||
# escape backslash first, then SQL LIKE wildcards
|
||||
return s.replace("\\", "\\\\").replace("%", r"\%").replace("_", r"\_")
|
||||
|
||||
|
||||
def _like_for_suffixes(base: str, ext: str) -> str:
|
||||
eb = _escape_like(base)
|
||||
if ext:
|
||||
ex = ext.replace("%", r"\%").replace("_", r"\_")
|
||||
return f"{eb}\\_%." + ex # literal underscore
|
||||
else:
|
||||
return f"{eb}\\_%"
|
||||
|
||||
|
||||
def _next_available_name(conn, user_id: str, base_name: str) -> str:
|
||||
"""
|
||||
Compute the next available non-conflicting name for a given user.
|
||||
Handles names with or without extensions and existing _N suffixes.
|
||||
"""
|
||||
base, ext = _split_base_ext(base_name)
|
||||
|
||||
# Load all sibling names once
|
||||
rows = conn.execute(
|
||||
sa.text("""
|
||||
SELECT name
|
||||
FROM file
|
||||
WHERE user_id = :uid
|
||||
AND (name = :base_name OR name LIKE :like ESCAPE '\\')
|
||||
"""),
|
||||
{"uid": user_id, "base_name": base_name, "like": _like_for_suffixes(base, ext)},
|
||||
).scalars().all()
|
||||
|
||||
taken: Set[str] = set(rows)
|
||||
|
||||
# Pattern to detect base_N(.ext) and capture N
|
||||
if ext:
|
||||
rx = re.compile(rf"^{re.escape(base)}_(\d+)\.{re.escape(ext)}$")
|
||||
else:
|
||||
rx = re.compile(rf"^{re.escape(base)}_(\d+)$")
|
||||
|
||||
max_n = 1
|
||||
for n in rows:
|
||||
m = rx.match(n)
|
||||
if m:
|
||||
max_n = max(max_n, int(m.group(1)))
|
||||
|
||||
n = max(max_n + 1, DUPLICATE_SUFFIX_START)
|
||||
while True:
|
||||
candidate = f"{base}_{n}.{ext}" if ext else f"{base}_{n}"
|
||||
if candidate not in taken:
|
||||
return candidate
|
||||
n += 1
|
||||
|
||||
|
||||
def _handle_duplicates_before_upgrade(conn) -> None:
|
||||
"""
|
||||
Ensure (user_id, name) is unique by renaming older duplicates before adding the composite unique constraint.
|
||||
Keeps the most recently updated/created/id-highest record; renames the rest with _N suffix.
|
||||
"""
|
||||
logger.info("Scanning for duplicate file names per user...")
|
||||
duplicates = conn.execute(
|
||||
sa.text(
|
||||
"""
|
||||
SELECT user_id, name, COUNT(*) AS cnt
|
||||
FROM file
|
||||
GROUP BY user_id, name
|
||||
HAVING COUNT(*) > 1
|
||||
"""
|
||||
)
|
||||
).fetchall()
|
||||
|
||||
if not duplicates:
|
||||
logger.info("No duplicates found.")
|
||||
return
|
||||
|
||||
logger.info("Found %d duplicate sets. Resolving...", len(duplicates))
|
||||
|
||||
# Add progress indicator for large datasets
|
||||
if len(duplicates) > 100:
|
||||
logger.info("Large number of duplicates detected. This may take several minutes...")
|
||||
|
||||
# Wrap in a nested transaction so we fail cleanly on any error
|
||||
with conn.begin_nested():
|
||||
# Process duplicates in batches for better performance on large datasets
|
||||
for batch_start in range(0, len(duplicates), BATCH_SIZE):
|
||||
batch_end = min(batch_start + BATCH_SIZE, len(duplicates))
|
||||
batch = duplicates[batch_start:batch_end]
|
||||
|
||||
if len(duplicates) > BATCH_SIZE:
|
||||
logger.info("Processing batch %d-%d of %d duplicate sets...",
|
||||
batch_start + 1, batch_end, len(duplicates))
|
||||
|
||||
for user_id, name, cnt in batch:
|
||||
logger.debug("Resolving duplicates for user=%s, name=%r (count=%s)", user_id, name, cnt)
|
||||
|
||||
file_ids = conn.execute(
|
||||
sa.text(
|
||||
"""
|
||||
SELECT id
|
||||
FROM file
|
||||
WHERE user_id = :uid AND name = :name
|
||||
ORDER BY updated_at DESC, created_at DESC, id DESC
|
||||
"""
|
||||
),
|
||||
{"uid": user_id, "name": name},
|
||||
).scalars().all()
|
||||
|
||||
# Keep the first (most recent), rename the rest
|
||||
for file_id in file_ids[1:]:
|
||||
new_name = _next_available_name(conn, user_id, name)
|
||||
conn.execute(
|
||||
sa.text("UPDATE file SET name = :new_name WHERE id = :fid"),
|
||||
{"new_name": new_name, "fid": file_id},
|
||||
)
|
||||
logger.debug("Renamed id=%s: %r -> %r", file_id, name, new_name)
|
||||
|
||||
# Progress update for large batches
|
||||
if len(duplicates) > BATCH_SIZE and batch_end < len(duplicates):
|
||||
logger.info("Completed %d of %d duplicate sets (%.1f%%)",
|
||||
batch_end, len(duplicates), (batch_end / len(duplicates)) * 100)
|
||||
|
||||
logger.info("Duplicate resolution completed.")
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
start_time = time.time()
|
||||
logger.info("Starting upgrade: adding composite unique (name, user_id) on file")
|
||||
|
||||
conn = op.get_bind()
|
||||
inspector = inspect(conn)
|
||||
|
||||
# 1) Resolve pre-existing duplicates so the new unique can be created
|
||||
duplicate_start = time.time()
|
||||
_handle_duplicates_before_upgrade(conn)
|
||||
duplicate_duration = time.time() - duplicate_start
|
||||
|
||||
if duplicate_duration > 1.0: # Only log if it took more than 1 second
|
||||
logger.info("Duplicate resolution completed in %.2f seconds", duplicate_duration)
|
||||
|
||||
# 2) Detect existing single-column unique on name (if any)
|
||||
inspector = inspect(conn) # refresh inspector
|
||||
single_name_uc = _get_unique_constraints_by_columns(inspector, "file", {"name"})
|
||||
composite_uc = _get_unique_constraints_by_columns(inspector, "file", {"name", "user_id"})
|
||||
|
||||
# 3) Use a unified, reflection-based batch_alter_table for both Postgres and SQLite.
|
||||
# recreate="always" ensures a safe table rebuild on SQLite and a standard alter on Postgres.
|
||||
constraint_start = time.time()
|
||||
with op.batch_alter_table("file", recreate="always") as batch_op:
|
||||
# Drop old single-column unique if present
|
||||
if single_name_uc:
|
||||
logger.info("Dropping existing single-column unique: %s", single_name_uc)
|
||||
batch_op.drop_constraint(single_name_uc, type_="unique")
|
||||
|
||||
# Create composite unique if not already present
|
||||
if not composite_uc:
|
||||
logger.info("Creating composite unique: file_name_user_id_key on (name, user_id)")
|
||||
batch_op.create_unique_constraint("file_name_user_id_key", ["name", "user_id"])
|
||||
else:
|
||||
logger.info("Composite unique already present: %s", composite_uc)
|
||||
|
||||
constraint_duration = time.time() - constraint_start
|
||||
if constraint_duration > 1.0: # Only log if it took more than 1 second
|
||||
logger.info("Constraint operations completed in %.2f seconds", constraint_duration)
|
||||
|
||||
total_duration = time.time() - start_time
|
||||
logger.info("Upgrade completed successfully in %.2f seconds", total_duration)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
start_time = time.time()
|
||||
logger.info("Starting downgrade: reverting to single-column unique on (name)")
|
||||
|
||||
conn = op.get_bind()
|
||||
inspector = inspect(conn)
|
||||
|
||||
# 1) Ensure no cross-user duplicates on name (since we'll enforce global uniqueness on name)
|
||||
logger.info("Checking for cross-user duplicate names prior to downgrade...")
|
||||
validation_start = time.time()
|
||||
|
||||
dup_names = conn.execute(
|
||||
sa.text(
|
||||
"""
|
||||
SELECT name, COUNT(*) AS cnt
|
||||
FROM file
|
||||
GROUP BY name
|
||||
HAVING COUNT(*) > 1
|
||||
"""
|
||||
)
|
||||
).fetchall()
|
||||
|
||||
validation_duration = time.time() - validation_start
|
||||
if validation_duration > 1.0: # Only log if it took more than 1 second
|
||||
logger.info("Validation completed in %.2f seconds", validation_duration)
|
||||
|
||||
if dup_names:
|
||||
examples = [row[0] for row in dup_names[:10]]
|
||||
raise RuntimeError(
|
||||
"Downgrade aborted: duplicate names exist across users. "
|
||||
f"Examples: {examples}{'...' if len(dup_names) > 10 else ''}. "
|
||||
"Rename conflicting files before downgrading."
|
||||
)
|
||||
|
||||
# 2) Detect constraints
|
||||
inspector = inspect(conn) # refresh
|
||||
composite_uc = _get_unique_constraints_by_columns(inspector, "file", {"name", "user_id"})
|
||||
single_name_uc = _get_unique_constraints_by_columns(inspector, "file", {"name"})
|
||||
|
||||
# 3) Perform alteration using batch with reflect to preserve other objects
|
||||
constraint_start = time.time()
|
||||
with op.batch_alter_table("file", recreate="always") as batch_op:
|
||||
if composite_uc:
|
||||
logger.info("Dropping composite unique: %s", composite_uc)
|
||||
batch_op.drop_constraint(composite_uc, type_="unique")
|
||||
else:
|
||||
logger.info("No composite unique found to drop.")
|
||||
|
||||
if not single_name_uc:
|
||||
logger.info("Creating single-column unique: file_name_key on (name)")
|
||||
batch_op.create_unique_constraint("file_name_key", ["name"])
|
||||
else:
|
||||
logger.info("Single-column unique already present: %s", single_name_uc)
|
||||
|
||||
constraint_duration = time.time() - constraint_start
|
||||
if constraint_duration > 1.0: # Only log if it took more than 1 second
|
||||
logger.info("Constraint operations completed in %.2f seconds", constraint_duration)
|
||||
|
||||
total_duration = time.time() - start_time
|
||||
logger.info("Downgrade completed successfully in %.2f seconds", total_duration)
|
||||
|
|
@ -6,19 +6,19 @@ Create Date: 2024-04-12 18:11:06.454037
|
|||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
from collections.abc import Sequence
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
from loguru import logger
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.engine.reflection import Inspector
|
||||
|
||||
from langflow.logging.logger import logger
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "4e5980a44eaa"
|
||||
down_revision: Union[str, None] = "79e675cb6752"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
down_revision: str | None = "79e675cb6752"
|
||||
branch_labels: str | Sequence[str] | None = None
|
||||
depends_on: str | Sequence[str] | None = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
|
|
@ -37,11 +37,10 @@ def upgrade() -> None:
|
|||
type_=sa.DateTime(timezone=True),
|
||||
existing_nullable=False,
|
||||
)
|
||||
elif created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'apikey'")
|
||||
else:
|
||||
if created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'apikey'")
|
||||
else:
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'apikey'")
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'apikey'")
|
||||
if "variable" in table_names:
|
||||
columns = inspector.get_columns("variable")
|
||||
created_at_column = next((column for column in columns if column["name"] == "created_at"), None)
|
||||
|
|
@ -54,11 +53,10 @@ def upgrade() -> None:
|
|||
type_=sa.DateTime(timezone=True),
|
||||
existing_nullable=True,
|
||||
)
|
||||
elif created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'variable'")
|
||||
else:
|
||||
if created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'variable'")
|
||||
else:
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'variable'")
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'variable'")
|
||||
if updated_at_column is not None and isinstance(updated_at_column["type"], postgresql.TIMESTAMP):
|
||||
batch_op.alter_column(
|
||||
"updated_at",
|
||||
|
|
@ -66,11 +64,10 @@ def upgrade() -> None:
|
|||
type_=sa.DateTime(timezone=True),
|
||||
existing_nullable=True,
|
||||
)
|
||||
elif updated_at_column is None:
|
||||
logger.warning("Column 'updated_at' not found in table 'variable'")
|
||||
else:
|
||||
if updated_at_column is None:
|
||||
logger.warning("Column 'updated_at' not found in table 'variable'")
|
||||
else:
|
||||
logger.warning(f"Column 'updated_at' has type {updated_at_column['type']} in table 'variable'")
|
||||
logger.warning(f"Column 'updated_at' has type {updated_at_column['type']} in table 'variable'")
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
|
@ -92,11 +89,10 @@ def downgrade() -> None:
|
|||
type_=postgresql.TIMESTAMP(),
|
||||
existing_nullable=True,
|
||||
)
|
||||
elif updated_at_column is None:
|
||||
logger.warning("Column 'updated_at' not found in table 'variable'")
|
||||
else:
|
||||
if updated_at_column is None:
|
||||
logger.warning("Column 'updated_at' not found in table 'variable'")
|
||||
else:
|
||||
logger.warning(f"Column 'updated_at' has type {updated_at_column['type']} in table 'variable'")
|
||||
logger.warning(f"Column 'updated_at' has type {updated_at_column['type']} in table 'variable'")
|
||||
if created_at_column is not None and isinstance(created_at_column["type"], sa.DateTime):
|
||||
batch_op.alter_column(
|
||||
"created_at",
|
||||
|
|
@ -104,11 +100,10 @@ def downgrade() -> None:
|
|||
type_=postgresql.TIMESTAMP(),
|
||||
existing_nullable=True,
|
||||
)
|
||||
elif created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'variable'")
|
||||
else:
|
||||
if created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'variable'")
|
||||
else:
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'variable'")
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'variable'")
|
||||
|
||||
if "apikey" in table_names:
|
||||
columns = inspector.get_columns("apikey")
|
||||
|
|
@ -121,10 +116,9 @@ def downgrade() -> None:
|
|||
type_=postgresql.TIMESTAMP(),
|
||||
existing_nullable=False,
|
||||
)
|
||||
elif created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'apikey'")
|
||||
else:
|
||||
if created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'apikey'")
|
||||
else:
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'apikey'")
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'apikey'")
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
|
|
|||
|
|
@ -6,16 +6,16 @@ Create Date: 2024-04-13 10:57:23.061709
|
|||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
from collections.abc import Sequence
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
from loguru import logger
|
||||
from sqlalchemy.engine.reflection import Inspector
|
||||
|
||||
down_revision: Union[str, None] = "4e5980a44eaa"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
from langflow.logging.logger import logger
|
||||
|
||||
down_revision: str | None = "4e5980a44eaa"
|
||||
branch_labels: str | Sequence[str] | None = None
|
||||
depends_on: str | Sequence[str] | None = None
|
||||
|
||||
# Revision identifiers, used by Alembic.
|
||||
revision = "58b28437a398"
|
||||
|
|
|
|||
|
|
@ -6,19 +6,19 @@ Create Date: 2024-04-11 19:23:10.697335
|
|||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
from collections.abc import Sequence
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
from loguru import logger
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.engine.reflection import Inspector
|
||||
|
||||
from langflow.logging.logger import logger
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "79e675cb6752"
|
||||
down_revision: Union[str, None] = "e3bc869fa272"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
down_revision: str | None = "e3bc869fa272"
|
||||
branch_labels: str | Sequence[str] | None = None
|
||||
depends_on: str | Sequence[str] | None = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
|
|
@ -37,11 +37,10 @@ def upgrade() -> None:
|
|||
type_=sa.DateTime(timezone=True),
|
||||
existing_nullable=False,
|
||||
)
|
||||
elif created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'apikey'")
|
||||
else:
|
||||
if created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'apikey'")
|
||||
else:
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'apikey'")
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'apikey'")
|
||||
if "variable" in table_names:
|
||||
columns = inspector.get_columns("variable")
|
||||
created_at_column = next((column for column in columns if column["name"] == "created_at"), None)
|
||||
|
|
@ -54,11 +53,10 @@ def upgrade() -> None:
|
|||
type_=sa.DateTime(timezone=True),
|
||||
existing_nullable=True,
|
||||
)
|
||||
elif created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'variable'")
|
||||
else:
|
||||
if created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'variable'")
|
||||
else:
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'variable'")
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'variable'")
|
||||
if updated_at_column is not None and isinstance(updated_at_column["type"], postgresql.TIMESTAMP):
|
||||
batch_op.alter_column(
|
||||
"updated_at",
|
||||
|
|
@ -66,11 +64,10 @@ def upgrade() -> None:
|
|||
type_=sa.DateTime(timezone=True),
|
||||
existing_nullable=True,
|
||||
)
|
||||
elif updated_at_column is None:
|
||||
logger.warning("Column 'updated_at' not found in table 'variable'")
|
||||
else:
|
||||
if updated_at_column is None:
|
||||
logger.warning("Column 'updated_at' not found in table 'variable'")
|
||||
else:
|
||||
logger.warning(f"Column 'updated_at' has type {updated_at_column['type']} in table 'variable'")
|
||||
logger.warning(f"Column 'updated_at' has type {updated_at_column['type']} in table 'variable'")
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
|
@ -92,11 +89,10 @@ def downgrade() -> None:
|
|||
type_=postgresql.TIMESTAMP(),
|
||||
existing_nullable=True,
|
||||
)
|
||||
elif updated_at_column is None:
|
||||
logger.warning("Column 'updated_at' not found in table 'variable'")
|
||||
else:
|
||||
if updated_at_column is None:
|
||||
logger.warning("Column 'updated_at' not found in table 'variable'")
|
||||
else:
|
||||
logger.warning(f"Column 'updated_at' has type {updated_at_column['type']} in table 'variable'")
|
||||
logger.warning(f"Column 'updated_at' has type {updated_at_column['type']} in table 'variable'")
|
||||
if created_at_column is not None and isinstance(created_at_column["type"], sa.DateTime):
|
||||
batch_op.alter_column(
|
||||
"created_at",
|
||||
|
|
@ -104,11 +100,10 @@ def downgrade() -> None:
|
|||
type_=postgresql.TIMESTAMP(),
|
||||
existing_nullable=True,
|
||||
)
|
||||
elif created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'variable'")
|
||||
else:
|
||||
if created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'variable'")
|
||||
else:
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'variable'")
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'variable'")
|
||||
|
||||
if "apikey" in table_names:
|
||||
columns = inspector.get_columns("apikey")
|
||||
|
|
@ -121,10 +116,9 @@ def downgrade() -> None:
|
|||
type_=postgresql.TIMESTAMP(),
|
||||
existing_nullable=False,
|
||||
)
|
||||
elif created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'apikey'")
|
||||
else:
|
||||
if created_at_column is None:
|
||||
logger.warning("Column 'created_at' not found in table 'apikey'")
|
||||
else:
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'apikey'")
|
||||
logger.warning(f"Column 'created_at' has type {created_at_column['type']} in table 'apikey'")
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
"""Add unique constraints
|
||||
"""Add unique constraints.
|
||||
|
||||
Revision ID: b2fa308044b5
|
||||
Revises: 0b8757876a7c
|
||||
|
|
@ -6,25 +6,25 @@ Create Date: 2024-01-26 13:31:14.797548
|
|||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
from collections.abc import Sequence
|
||||
|
||||
import sqlalchemy as sa
|
||||
import sqlmodel
|
||||
from alembic import op
|
||||
from loguru import logger # noqa
|
||||
from sqlalchemy.engine.reflection import Inspector
|
||||
|
||||
from langflow.logging.logger import logger
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "b2fa308044b5"
|
||||
down_revision: Union[str, None] = "0b8757876a7c"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
down_revision: str | None = "0b8757876a7c"
|
||||
branch_labels: str | Sequence[str] | None = None
|
||||
depends_on: str | Sequence[str] | None = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
conn = op.get_bind()
|
||||
inspector = sa.inspect(conn) # type: ignore
|
||||
inspector = sa.inspect(conn)
|
||||
tables = inspector.get_table_names()
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
try:
|
||||
|
|
@ -53,14 +53,13 @@ def upgrade() -> None:
|
|||
if "fk_flow_user_id_user" not in constraint_names:
|
||||
batch_op.create_foreign_key("fk_flow_user_id_user", "user", ["user_id"], ["id"])
|
||||
|
||||
except Exception as e:
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.exception(f"Error during upgrade: {e}")
|
||||
pass
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
conn = op.get_bind()
|
||||
inspector = sa.inspect(conn) # type: ignore
|
||||
inspector = sa.inspect(conn)
|
||||
try:
|
||||
# Re-create the dropped table 'flowstyle' if it was previously dropped in upgrade
|
||||
if "flowstyle" not in inspector.get_table_names():
|
||||
|
|
@ -97,6 +96,6 @@ def downgrade() -> None:
|
|||
if "fk_flow_user_id_user" in constraint_names:
|
||||
batch_op.drop_constraint("fk_flow_user_id_user", type_="foreignkey")
|
||||
|
||||
except Exception as e:
|
||||
except Exception as e: # noqa: BLE001
|
||||
# It's generally a good idea to log the exception or handle it in a way other than a bare pass
|
||||
print(f"Error during downgrade: {e}")
|
||||
logger.exception(f"Error during downgrade: {e}")
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ import uuid
|
|||
from collections.abc import AsyncIterator
|
||||
|
||||
from fastapi import BackgroundTasks, HTTPException, Response
|
||||
from loguru import logger
|
||||
from sqlmodel import select
|
||||
|
||||
from langflow.api.disconnect import DisconnectHandlerStreamingResponse
|
||||
|
|
@ -20,16 +19,12 @@ from langflow.api.utils import (
|
|||
get_top_level_vertices,
|
||||
parse_exception,
|
||||
)
|
||||
from langflow.api.v1.schemas import (
|
||||
FlowDataRequest,
|
||||
InputValueRequest,
|
||||
ResultDataResponse,
|
||||
VertexBuildResponse,
|
||||
)
|
||||
from langflow.api.v1.schemas import FlowDataRequest, InputValueRequest, ResultDataResponse, VertexBuildResponse
|
||||
from langflow.events.event_manager import EventManager
|
||||
from langflow.exceptions.component import ComponentBuildError
|
||||
from langflow.graph.graph.base import Graph
|
||||
from langflow.graph.utils import log_vertex_build
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.message import ErrorMessage
|
||||
from langflow.schema.schema import OutputValue
|
||||
from langflow.services.database.models.flow.model import Flow
|
||||
|
|
@ -75,7 +70,7 @@ async def start_flow_build(
|
|||
)
|
||||
queue_service.start_job(job_id, task_coro)
|
||||
except Exception as e:
|
||||
logger.exception("Failed to create queue and start task")
|
||||
await logger.aexception("Failed to create queue and start task")
|
||||
raise HTTPException(status_code=500, detail=str(e)) from e
|
||||
return job_id
|
||||
|
||||
|
|
@ -91,7 +86,7 @@ async def get_flow_events_response(
|
|||
main_queue, event_manager, event_task, _ = queue_service.get_queue_data(job_id)
|
||||
if event_delivery in (EventDeliveryType.STREAMING, EventDeliveryType.DIRECT):
|
||||
if event_task is None:
|
||||
logger.error(f"No event task found for job {job_id}")
|
||||
await logger.aerror(f"No event task found for job {job_id}")
|
||||
raise HTTPException(status_code=404, detail="No event task found for job")
|
||||
return await create_flow_response(
|
||||
queue=main_queue,
|
||||
|
|
@ -130,19 +125,19 @@ async def get_flow_events_response(
|
|||
content = "\n".join([event for event in events if event is not None])
|
||||
return Response(content=content, media_type="application/x-ndjson")
|
||||
except asyncio.CancelledError as exc:
|
||||
logger.info(f"Event polling was cancelled for job {job_id}")
|
||||
await logger.ainfo(f"Event polling was cancelled for job {job_id}")
|
||||
raise HTTPException(status_code=499, detail="Event polling was cancelled") from exc
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"Timeout while waiting for events for job {job_id}")
|
||||
await logger.awarning(f"Timeout while waiting for events for job {job_id}")
|
||||
return Response(content="", media_type="application/x-ndjson") # Return empty response instead of error
|
||||
|
||||
except JobQueueNotFoundError as exc:
|
||||
logger.error(f"Job not found: {job_id}. Error: {exc!s}")
|
||||
await logger.aerror(f"Job not found: {job_id}. Error: {exc!s}")
|
||||
raise HTTPException(status_code=404, detail=f"Job not found: {exc!s}") from exc
|
||||
except Exception as exc:
|
||||
if isinstance(exc, HTTPException):
|
||||
raise
|
||||
logger.exception(f"Unexpected error processing flow events for job {job_id}")
|
||||
await logger.aexception(f"Unexpected error processing flow events for job {job_id}")
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error: {exc!s}") from exc
|
||||
|
||||
|
||||
|
|
@ -161,9 +156,9 @@ async def create_flow_response(
|
|||
break
|
||||
get_time = time.time()
|
||||
yield value.decode("utf-8")
|
||||
logger.debug(f"Event {event_id} consumed in {get_time - put_time:.4f}s")
|
||||
await logger.adebug(f"Event {event_id} consumed in {get_time - put_time:.4f}s")
|
||||
except Exception as exc: # noqa: BLE001
|
||||
logger.exception(f"Error consuming event: {exc}")
|
||||
await logger.aexception(f"Error consuming event: {exc}")
|
||||
break
|
||||
|
||||
def on_disconnect() -> None:
|
||||
|
|
@ -233,7 +228,7 @@ async def generate_flow_events(
|
|||
|
||||
if "stream or streaming set to True" in str(exc):
|
||||
raise HTTPException(status_code=400, detail=str(exc)) from exc
|
||||
logger.exception("Error checking build status")
|
||||
await logger.aexception("Error checking build status")
|
||||
raise HTTPException(status_code=500, detail=str(exc)) from exc
|
||||
return first_layer, vertices_to_run, graph
|
||||
|
||||
|
|
@ -317,7 +312,7 @@ async def generate_flow_events(
|
|||
tb = exc.formatted_traceback
|
||||
else:
|
||||
tb = traceback.format_exc()
|
||||
logger.exception("Error building Component")
|
||||
await logger.aexception("Error building Component")
|
||||
params = format_exception_message(exc)
|
||||
message = {"errorMessage": params, "stackTrace": tb}
|
||||
valid = False
|
||||
|
|
@ -390,7 +385,7 @@ async def generate_flow_events(
|
|||
component_error_message=str(exc),
|
||||
),
|
||||
)
|
||||
logger.exception("Error building Component")
|
||||
await logger.aexception("Error building Component")
|
||||
message = parse_exception(exc)
|
||||
raise HTTPException(status_code=500, detail=message) from exc
|
||||
|
||||
|
|
@ -411,7 +406,7 @@ async def generate_flow_events(
|
|||
try:
|
||||
vertex_build_response: VertexBuildResponse = await _build_vertex(vertex_id, graph, event_manager)
|
||||
except asyncio.CancelledError as exc:
|
||||
logger.error(f"Build cancelled: {exc}")
|
||||
await logger.aerror(f"Build cancelled: {exc}")
|
||||
raise
|
||||
|
||||
# send built event or error event
|
||||
|
|
@ -459,7 +454,7 @@ async def generate_flow_events(
|
|||
background_tasks.add_task(graph.end_all_traces_in_context())
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error building vertices: {e}")
|
||||
await logger.aerror(f"Error building vertices: {e}")
|
||||
custom_component = graph.get_vertex(vertex_id).custom_component
|
||||
trace_name = getattr(custom_component, "trace_name", None)
|
||||
error_message = ErrorMessage(
|
||||
|
|
@ -499,11 +494,11 @@ async def cancel_flow_build(
|
|||
_, _, event_task, _ = queue_service.get_queue_data(job_id)
|
||||
|
||||
if event_task is None:
|
||||
logger.warning(f"No event task found for job_id {job_id}")
|
||||
await logger.awarning(f"No event task found for job_id {job_id}")
|
||||
return True # Nothing to cancel is still a success
|
||||
|
||||
if event_task.done():
|
||||
logger.info(f"Task for job_id {job_id} is already completed")
|
||||
await logger.ainfo(f"Task for job_id {job_id} is already completed")
|
||||
return True # Nothing to cancel is still a success
|
||||
|
||||
# Store the task reference to check status after cleanup
|
||||
|
|
@ -515,18 +510,18 @@ async def cancel_flow_build(
|
|||
except asyncio.CancelledError:
|
||||
# Check if the task was actually cancelled
|
||||
if task_before_cleanup.cancelled():
|
||||
logger.info(f"Successfully cancelled flow build for job_id {job_id} (CancelledError caught)")
|
||||
await logger.ainfo(f"Successfully cancelled flow build for job_id {job_id} (CancelledError caught)")
|
||||
return True
|
||||
# If the task wasn't cancelled, re-raise the exception
|
||||
logger.error(f"CancelledError caught but task for job_id {job_id} was not cancelled")
|
||||
await logger.aerror(f"CancelledError caught but task for job_id {job_id} was not cancelled")
|
||||
raise
|
||||
|
||||
# If no exception was raised, verify that the task was actually cancelled
|
||||
# The task should be done (cancelled) after cleanup
|
||||
if task_before_cleanup.cancelled():
|
||||
logger.info(f"Successfully cancelled flow build for job_id {job_id}")
|
||||
await logger.ainfo(f"Successfully cancelled flow build for job_id {job_id}")
|
||||
return True
|
||||
|
||||
# If we get here, the task wasn't cancelled properly
|
||||
logger.error(f"Failed to cancel flow build for job_id {job_id}, task is still running")
|
||||
await logger.aerror(f"Failed to cancel flow build for job_id {job_id}, task is still running")
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
import uuid
|
||||
|
||||
from fastapi import APIRouter, HTTPException, status
|
||||
from loguru import logger
|
||||
from pydantic import BaseModel
|
||||
from sqlmodel import select
|
||||
|
||||
from langflow.api.utils import DbSession
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.services.database.models.flow.model import Flow
|
||||
from langflow.services.deps import get_chat_service
|
||||
|
||||
|
|
@ -49,7 +49,7 @@ async def health_check(
|
|||
(await session.exec(stmt)).first()
|
||||
response.db = "ok"
|
||||
except Exception: # noqa: BLE001
|
||||
logger.exception("Error checking database")
|
||||
await logger.aexception("Error checking database")
|
||||
|
||||
try:
|
||||
chat = get_chat_service()
|
||||
|
|
@ -57,7 +57,7 @@ async def health_check(
|
|||
await chat.get_cache("health_check")
|
||||
response.chat = "ok"
|
||||
except Exception: # noqa: BLE001
|
||||
logger.exception("Error checking chat service")
|
||||
await logger.aexception("Error checking chat service")
|
||||
|
||||
if response.has_error():
|
||||
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=response.model_dump())
|
||||
|
|
|
|||
|
|
@ -8,10 +8,12 @@ from langflow.api.v1 import (
|
|||
files_router,
|
||||
flows_router,
|
||||
folders_router,
|
||||
knowledge_bases_router,
|
||||
login_router,
|
||||
mcp_projects_router,
|
||||
mcp_router,
|
||||
monitor_router,
|
||||
openai_responses_router,
|
||||
projects_router,
|
||||
starter_projects_router,
|
||||
store_router,
|
||||
|
|
@ -45,9 +47,11 @@ router_v1.include_router(monitor_router)
|
|||
router_v1.include_router(folders_router)
|
||||
router_v1.include_router(projects_router)
|
||||
router_v1.include_router(starter_projects_router)
|
||||
router_v1.include_router(knowledge_bases_router)
|
||||
router_v1.include_router(mcp_router)
|
||||
router_v1.include_router(voice_mode_router)
|
||||
router_v1.include_router(mcp_projects_router)
|
||||
router_v1.include_router(openai_responses_router)
|
||||
|
||||
router_v2.include_router(files_router_v2)
|
||||
router_v2.include_router(mcp_router_v2)
|
||||
|
|
|
|||
|
|
@ -8,11 +8,11 @@ from typing import TYPE_CHECKING, Annotated, Any
|
|||
|
||||
from fastapi import Depends, HTTPException, Query
|
||||
from fastapi_pagination import Params
|
||||
from loguru import logger
|
||||
from sqlalchemy import delete
|
||||
from sqlmodel.ext.asyncio.session import AsyncSession
|
||||
|
||||
from langflow.graph.graph.base import Graph
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.services.auth.utils import get_current_active_user, get_current_active_user_mcp
|
||||
from langflow.services.database.models.flow.model import Flow
|
||||
from langflow.services.database.models.message.model import MessageTable
|
||||
|
|
@ -119,7 +119,7 @@ async def check_langflow_version(component: StoreComponentCreate) -> None:
|
|||
if langflow_version is None:
|
||||
raise HTTPException(status_code=500, detail="Unable to verify the latest version of Langflow")
|
||||
if langflow_version != component.last_tested_version:
|
||||
logger.warning(
|
||||
await logger.awarning(
|
||||
f"Your version of Langflow ({component.last_tested_version}) is outdated. "
|
||||
f"Please update to the latest version ({langflow_version}) and try again."
|
||||
)
|
||||
|
|
@ -371,7 +371,7 @@ async def verify_public_flow_and_get_user(flow_id: uuid.UUID, client_id: str | N
|
|||
user = await get_user_by_flow_id_or_endpoint_name(str(flow_id))
|
||||
|
||||
except Exception as exc:
|
||||
logger.exception(f"Error getting user for public flow {flow_id}")
|
||||
await logger.aexception(f"Error getting user for public flow {flow_id}")
|
||||
raise HTTPException(status_code=403, detail="Flow is not accessible") from exc
|
||||
|
||||
if not user:
|
||||
|
|
|
|||
|
|
@ -4,10 +4,12 @@ from langflow.api.v1.endpoints import router as endpoints_router
|
|||
from langflow.api.v1.files import router as files_router
|
||||
from langflow.api.v1.flows import router as flows_router
|
||||
from langflow.api.v1.folders import router as folders_router
|
||||
from langflow.api.v1.knowledge_bases import router as knowledge_bases_router
|
||||
from langflow.api.v1.login import router as login_router
|
||||
from langflow.api.v1.mcp import router as mcp_router
|
||||
from langflow.api.v1.mcp_projects import router as mcp_projects_router
|
||||
from langflow.api.v1.monitor import router as monitor_router
|
||||
from langflow.api.v1.openai_responses import router as openai_responses_router
|
||||
from langflow.api.v1.projects import router as projects_router
|
||||
from langflow.api.v1.starter_projects import router as starter_projects_router
|
||||
from langflow.api.v1.store import router as store_router
|
||||
|
|
@ -23,10 +25,12 @@ __all__ = [
|
|||
"files_router",
|
||||
"flows_router",
|
||||
"folders_router",
|
||||
"knowledge_bases_router",
|
||||
"login_router",
|
||||
"mcp_projects_router",
|
||||
"mcp_router",
|
||||
"monitor_router",
|
||||
"openai_responses_router",
|
||||
"projects_router",
|
||||
"starter_projects_router",
|
||||
"store_router",
|
||||
|
|
|
|||
|
|
@ -5,10 +5,10 @@ from uuid import UUID
|
|||
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
from langchain_core.callbacks.base import AsyncCallbackHandler
|
||||
from loguru import logger
|
||||
from typing_extensions import override
|
||||
|
||||
from langflow.api.v1.schemas import ChatResponse, PromptResponse
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.services.deps import get_chat_service, get_socket_service
|
||||
from langflow.utils.util import remove_ansi_escape_codes
|
||||
|
||||
|
|
@ -78,7 +78,7 @@ class AsyncStreamingLLMCallbackHandleSIO(AsyncCallbackHandler):
|
|||
for resp in resps:
|
||||
await self.socketio_service.emit_token(to=self.sid, data=resp.model_dump())
|
||||
except Exception: # noqa: BLE001
|
||||
logger.exception("Error sending response")
|
||||
await logger.aexception("Error sending response")
|
||||
|
||||
async def on_tool_error(
|
||||
self,
|
||||
|
|
|
|||
|
|
@ -6,23 +6,10 @@ import traceback
|
|||
import uuid
|
||||
from typing import TYPE_CHECKING, Annotated
|
||||
|
||||
from fastapi import (
|
||||
APIRouter,
|
||||
BackgroundTasks,
|
||||
Body,
|
||||
Depends,
|
||||
HTTPException,
|
||||
Request,
|
||||
status,
|
||||
)
|
||||
from fastapi import APIRouter, BackgroundTasks, Body, Depends, HTTPException, Request, status
|
||||
from fastapi.responses import StreamingResponse
|
||||
from loguru import logger
|
||||
|
||||
from langflow.api.build import (
|
||||
cancel_flow_build,
|
||||
get_flow_events_response,
|
||||
start_flow_build,
|
||||
)
|
||||
from langflow.api.build import cancel_flow_build, get_flow_events_response, start_flow_build
|
||||
from langflow.api.limited_background_tasks import LimitVertexBuildBackgroundTasks
|
||||
from langflow.api.utils import (
|
||||
CurrentActiveUser,
|
||||
|
|
@ -48,6 +35,7 @@ from langflow.api.v1.schemas import (
|
|||
from langflow.exceptions.component import ComponentBuildError
|
||||
from langflow.graph.graph.base import Graph
|
||||
from langflow.graph.utils import log_vertex_build
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.schema import OutputValue
|
||||
from langflow.services.cache.utils import CacheMiss
|
||||
from langflow.services.chat.service import ChatService
|
||||
|
|
@ -135,7 +123,7 @@ async def retrieve_vertices_order(
|
|||
)
|
||||
if "stream or streaming set to True" in str(exc):
|
||||
raise HTTPException(status_code=400, detail=str(exc)) from exc
|
||||
logger.exception("Error checking build status")
|
||||
await logger.aexception("Error checking build status")
|
||||
raise HTTPException(status_code=500, detail=str(exc)) from exc
|
||||
|
||||
|
||||
|
|
@ -239,17 +227,17 @@ async def cancel_build(
|
|||
return CancelFlowResponse(success=False, message="Failed to cancel flow build")
|
||||
except asyncio.CancelledError:
|
||||
# If CancelledError reaches here, it means the task was not successfully cancelled
|
||||
logger.error(f"Failed to cancel flow build for job_id {job_id} (CancelledError caught)")
|
||||
await logger.aerror(f"Failed to cancel flow build for job_id {job_id} (CancelledError caught)")
|
||||
return CancelFlowResponse(success=False, message="Failed to cancel flow build")
|
||||
except ValueError as exc:
|
||||
# Job not found
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
|
||||
except JobQueueNotFoundError as exc:
|
||||
logger.error(f"Job not found: {job_id}. Error: {exc!s}")
|
||||
await logger.aerror(f"Job not found: {job_id}. Error: {exc!s}")
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Job not found: {exc!s}") from exc
|
||||
except Exception as exc:
|
||||
# Any other unexpected error
|
||||
logger.exception(f"Error cancelling flow build for job_id {job_id}: {exc}")
|
||||
await logger.aexception(f"Error cancelling flow build for job_id {job_id}: {exc}")
|
||||
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(exc)) from exc
|
||||
|
||||
|
||||
|
|
@ -297,7 +285,7 @@ async def build_vertex(
|
|||
cache = await chat_service.get_cache(flow_id_str)
|
||||
if isinstance(cache, CacheMiss):
|
||||
# If there's no cache
|
||||
logger.warning(f"No cache found for {flow_id_str}. Building graph starting at {vertex_id}")
|
||||
await logger.awarning(f"No cache found for {flow_id_str}. Building graph starting at {vertex_id}")
|
||||
graph = await build_graph_from_db(
|
||||
flow_id=flow_id,
|
||||
session=await anext(get_session()),
|
||||
|
|
@ -331,7 +319,7 @@ async def build_vertex(
|
|||
tb = exc.formatted_traceback
|
||||
else:
|
||||
tb = traceback.format_exc()
|
||||
logger.exception("Error building Component")
|
||||
await logger.aexception("Error building Component")
|
||||
params = format_exception_message(exc)
|
||||
message = {"errorMessage": params, "stackTrace": tb}
|
||||
valid = False
|
||||
|
|
@ -408,7 +396,7 @@ async def build_vertex(
|
|||
component_error_message=str(exc),
|
||||
),
|
||||
)
|
||||
logger.exception("Error building Component")
|
||||
await logger.aexception("Error building Component")
|
||||
message = parse_exception(exc)
|
||||
raise HTTPException(status_code=500, detail=message) from exc
|
||||
|
||||
|
|
@ -421,14 +409,14 @@ async def _stream_vertex(flow_id: str, vertex_id: str, chat_service: ChatService
|
|||
try:
|
||||
cache = await chat_service.get_cache(flow_id)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
logger.exception("Error building Component")
|
||||
await logger.aexception("Error building Component")
|
||||
yield str(StreamData(event="error", data={"error": str(exc)}))
|
||||
return
|
||||
|
||||
if isinstance(cache, CacheMiss):
|
||||
# If there's no cache
|
||||
msg = f"No cache found for {flow_id}."
|
||||
logger.error(msg)
|
||||
await logger.aerror(msg)
|
||||
yield str(StreamData(event="error", data={"error": msg}))
|
||||
return
|
||||
else:
|
||||
|
|
@ -437,13 +425,13 @@ async def _stream_vertex(flow_id: str, vertex_id: str, chat_service: ChatService
|
|||
try:
|
||||
vertex: InterfaceVertex = graph.get_vertex(vertex_id)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
logger.exception("Error building Component")
|
||||
await logger.aexception("Error building Component")
|
||||
yield str(StreamData(event="error", data={"error": str(exc)}))
|
||||
return
|
||||
|
||||
if not hasattr(vertex, "stream"):
|
||||
msg = f"Vertex {vertex_id} does not support streaming"
|
||||
logger.error(msg)
|
||||
await logger.aerror(msg)
|
||||
yield str(StreamData(event="error", data={"error": msg}))
|
||||
return
|
||||
|
||||
|
|
@ -460,7 +448,7 @@ async def _stream_vertex(flow_id: str, vertex_id: str, chat_service: ChatService
|
|||
yield str(stream_data)
|
||||
|
||||
elif not vertex.frozen or not vertex.built:
|
||||
logger.debug(f"Streaming vertex {vertex_id}")
|
||||
await logger.adebug(f"Streaming vertex {vertex_id}")
|
||||
stream_data = StreamData(
|
||||
event="message",
|
||||
data={"message": f"Streaming vertex {vertex_id}"},
|
||||
|
|
@ -474,7 +462,7 @@ async def _stream_vertex(flow_id: str, vertex_id: str, chat_service: ChatService
|
|||
)
|
||||
yield str(stream_data)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
logger.exception("Error building Component")
|
||||
await logger.aexception("Error building Component")
|
||||
exc_message = parse_exception(exc)
|
||||
if exc_message == "The message must be an iterator or an async iterator.":
|
||||
exc_message = "This stream has already been closed."
|
||||
|
|
@ -487,11 +475,11 @@ async def _stream_vertex(flow_id: str, vertex_id: str, chat_service: ChatService
|
|||
yield str(stream_data)
|
||||
else:
|
||||
msg = f"No result found for vertex {vertex_id}"
|
||||
logger.error(msg)
|
||||
await logger.aerror(msg)
|
||||
yield str(StreamData(event="error", data={"error": msg}))
|
||||
return
|
||||
finally:
|
||||
logger.debug("Closing stream")
|
||||
await logger.adebug("Closing stream")
|
||||
if graph:
|
||||
await chat_service.set_cache(flow_id, graph)
|
||||
yield str(StreamData(event="close", data={"message": "Stream closed"}))
|
||||
|
|
@ -625,7 +613,7 @@ async def build_public_tmp(
|
|||
flow_name=flow_name or f"{client_id}_{flow_id}",
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.exception("Error building public flow")
|
||||
await logger.aexception("Error building public flow")
|
||||
if isinstance(exc, HTTPException):
|
||||
raise
|
||||
raise HTTPException(status_code=500, detail=str(exc)) from exc
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ import sqlalchemy as sa
|
|||
from fastapi import APIRouter, BackgroundTasks, Body, Depends, HTTPException, Request, UploadFile, status
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
from fastapi.responses import StreamingResponse
|
||||
from loguru import logger
|
||||
from sqlmodel import select
|
||||
|
||||
from langflow.api.utils import CurrentActiveUser, DbSession, parse_value
|
||||
|
|
@ -39,11 +38,11 @@ from langflow.exceptions.serialization import SerializationError
|
|||
from langflow.graph.graph.base import Graph
|
||||
from langflow.graph.schema import RunOutputs
|
||||
from langflow.helpers.flow import get_flow_by_id_or_endpoint_name
|
||||
from langflow.helpers.user import get_user_by_flow_id_or_endpoint_name
|
||||
from langflow.interface.initialize.loading import update_params_with_load_from_db_fields
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.processing.process import process_tweaks, run_graph_internal
|
||||
from langflow.schema.graph import Tweaks
|
||||
from langflow.services.auth.utils import api_key_security, get_current_active_user
|
||||
from langflow.services.auth.utils import api_key_security, get_current_active_user, get_webhook_user
|
||||
from langflow.services.cache.utils import save_uploaded_file
|
||||
from langflow.services.database.models.flow.model import Flow, FlowRead
|
||||
from langflow.services.database.models.flow.utils import get_all_webhook_components_in_flow
|
||||
|
|
@ -116,6 +115,7 @@ async def simple_run_flow(
|
|||
stream: bool = False,
|
||||
api_key_user: User | None = None,
|
||||
event_manager: EventManager | None = None,
|
||||
context: dict | None = None,
|
||||
):
|
||||
validate_input_and_tweaks(input_request)
|
||||
try:
|
||||
|
|
@ -127,7 +127,9 @@ async def simple_run_flow(
|
|||
raise ValueError(msg)
|
||||
graph_data = flow.data.copy()
|
||||
graph_data = process_tweaks(graph_data, input_request.tweaks or {}, stream=stream)
|
||||
graph = Graph.from_payload(graph_data, flow_id=flow_id_str, user_id=str(user_id), flow_name=flow.name)
|
||||
graph = Graph.from_payload(
|
||||
graph_data, flow_id=flow_id_str, user_id=str(user_id), flow_name=flow.name, context=context
|
||||
)
|
||||
inputs = None
|
||||
if input_request.input_value is not None:
|
||||
inputs = [
|
||||
|
|
@ -184,7 +186,7 @@ async def simple_run_flow_task(
|
|||
)
|
||||
|
||||
except Exception: # noqa: BLE001
|
||||
logger.exception(f"Error running flow {flow.id} task")
|
||||
await logger.aexception(f"Error running flow {flow.id} task")
|
||||
|
||||
|
||||
async def consume_and_yield(queue: asyncio.Queue, client_consumed_queue: asyncio.Queue) -> AsyncGenerator:
|
||||
|
|
@ -215,7 +217,7 @@ async def consume_and_yield(queue: asyncio.Queue, client_consumed_queue: asyncio
|
|||
yield value
|
||||
get_time_yield = time.time()
|
||||
client_consumed_queue.put_nowait(event_id)
|
||||
logger.debug(
|
||||
await logger.adebug(
|
||||
f"consumed event {event_id} "
|
||||
f"(time in queue, {get_time - put_time:.4f}, "
|
||||
f"client {get_time_yield - get_time:.4f})"
|
||||
|
|
@ -228,6 +230,7 @@ async def run_flow_generator(
|
|||
api_key_user: User | None,
|
||||
event_manager: EventManager,
|
||||
client_consumed_queue: asyncio.Queue,
|
||||
context: dict | None = None,
|
||||
) -> None:
|
||||
"""Executes a flow asynchronously and manages event streaming to the client.
|
||||
|
||||
|
|
@ -240,6 +243,7 @@ async def run_flow_generator(
|
|||
api_key_user (User | None): Optional authenticated user running the flow
|
||||
event_manager (EventManager): Manages the streaming of events to the client
|
||||
client_consumed_queue (asyncio.Queue): Tracks client consumption of events
|
||||
context (dict | None): Optional context to pass to the flow
|
||||
|
||||
Events Generated:
|
||||
- "add_message": Sent when new messages are added during flow execution
|
||||
|
|
@ -260,11 +264,12 @@ async def run_flow_generator(
|
|||
stream=True,
|
||||
api_key_user=api_key_user,
|
||||
event_manager=event_manager,
|
||||
context=context,
|
||||
)
|
||||
event_manager.on_end(data={"result": result.model_dump()})
|
||||
await client_consumed_queue.get()
|
||||
except (ValueError, InvalidChatInputError, SerializationError) as e:
|
||||
logger.error(f"Error running flow: {e}")
|
||||
await logger.aerror(f"Error running flow: {e}")
|
||||
event_manager.on_error(data={"error": str(e)})
|
||||
finally:
|
||||
await event_manager.queue.put((None, None, time.time))
|
||||
|
|
@ -331,7 +336,7 @@ async def simplified_run_flow(
|
|||
)
|
||||
|
||||
async def on_disconnect() -> None:
|
||||
logger.debug("Client disconnected, closing tasks")
|
||||
await logger.adebug("Client disconnected, closing tasks")
|
||||
main_task.cancel()
|
||||
|
||||
return StreamingResponse(
|
||||
|
|
@ -393,16 +398,16 @@ async def simplified_run_flow(
|
|||
|
||||
@router.post("/webhook/{flow_id_or_name}", response_model=dict, status_code=HTTPStatus.ACCEPTED) # noqa: RUF100, FAST003
|
||||
async def webhook_run_flow(
|
||||
flow_id_or_name: str,
|
||||
flow: Annotated[Flow, Depends(get_flow_by_id_or_endpoint_name)],
|
||||
user: Annotated[User, Depends(get_user_by_flow_id_or_endpoint_name)],
|
||||
request: Request,
|
||||
background_tasks: BackgroundTasks,
|
||||
):
|
||||
"""Run a flow using a webhook request.
|
||||
|
||||
Args:
|
||||
flow (Flow, optional): The flow to be executed. Defaults to Depends(get_flow_by_id).
|
||||
user (User): The flow user.
|
||||
flow_id_or_name (str): The flow ID or endpoint name.
|
||||
flow (Flow): The flow to be executed.
|
||||
request (Request): The incoming HTTP request.
|
||||
background_tasks (BackgroundTasks): The background tasks manager.
|
||||
|
||||
|
|
@ -414,8 +419,12 @@ async def webhook_run_flow(
|
|||
"""
|
||||
telemetry_service = get_telemetry_service()
|
||||
start_time = time.perf_counter()
|
||||
logger.debug("Received webhook request")
|
||||
await logger.adebug("Received webhook request")
|
||||
error_msg = ""
|
||||
|
||||
# Get the appropriate user for webhook execution based on auth settings
|
||||
webhook_user = await get_webhook_user(flow_id_or_name, request)
|
||||
|
||||
try:
|
||||
try:
|
||||
data = await request.body()
|
||||
|
|
@ -442,12 +451,12 @@ async def webhook_run_flow(
|
|||
session_id=None,
|
||||
)
|
||||
|
||||
logger.debug("Starting background task")
|
||||
await logger.adebug("Starting background task")
|
||||
background_tasks.add_task(
|
||||
simple_run_flow_task,
|
||||
flow=flow,
|
||||
input_request=input_request,
|
||||
api_key_user=user,
|
||||
api_key_user=webhook_user,
|
||||
)
|
||||
except Exception as exc:
|
||||
error_msg = str(exc)
|
||||
|
|
@ -553,7 +562,7 @@ async def experimental_run_flow(
|
|||
except sa.exc.StatementError as exc:
|
||||
# StatementError('(builtins.ValueError) badly formed hexadecimal UUID string')
|
||||
if "badly formed hexadecimal UUID string" in str(exc):
|
||||
logger.error(f"Flow ID {flow_id_str} is not a valid UUID")
|
||||
await logger.aerror(f"Flow ID {flow_id_str} is not a valid UUID")
|
||||
# This means the Flow ID is not a valid UUID which means it can't find the flow
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
|
||||
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(exc)) from exc
|
||||
|
|
@ -600,7 +609,7 @@ async def experimental_run_flow(
|
|||
async def process(_flow_id) -> None:
|
||||
"""Endpoint to process an input with a given flow_id."""
|
||||
# Raise a depreciation warning
|
||||
logger.warning(
|
||||
await logger.awarning(
|
||||
"The /process endpoint is deprecated and will be removed in a future version. Please use /run instead."
|
||||
)
|
||||
raise HTTPException(
|
||||
|
|
@ -643,7 +652,7 @@ async def create_upload_file(
|
|||
file_path=file_path,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.exception("Error saving file")
|
||||
await logger.aexception("Error saving file")
|
||||
raise HTTPException(status_code=500, detail=str(exc)) from exc
|
||||
|
||||
|
||||
|
|
@ -724,7 +733,7 @@ async def custom_component_update(
|
|||
field_value=code_request.field_value,
|
||||
field_name=code_request.field,
|
||||
)
|
||||
if "code" not in updated_build_config:
|
||||
if "code" not in updated_build_config or not updated_build_config.get("code", {}).get("value"):
|
||||
updated_build_config = add_code_field_to_build_config(updated_build_config, code_request.code)
|
||||
component_node["template"] = updated_build_config
|
||||
|
||||
|
|
@ -756,7 +765,7 @@ async def get_config() -> ConfigResponse:
|
|||
"""
|
||||
try:
|
||||
settings_service: SettingsService = get_settings_service()
|
||||
return ConfigResponse.from_settings(settings_service.settings)
|
||||
return ConfigResponse.from_settings(settings_service.settings, settings_service.auth_settings)
|
||||
|
||||
except Exception as exc:
|
||||
raise HTTPException(status_code=500, detail=str(exc)) from exc
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ async def _save_flow_to_fs(flow: Flow) -> None:
|
|||
try:
|
||||
await f.write(flow.model_dump_json())
|
||||
except OSError:
|
||||
logger.exception("Failed to write flow %s to path %s", flow.name, flow.fs_path)
|
||||
await logger.aexception("Failed to write flow %s to path %s", flow.name, flow.fs_path)
|
||||
|
||||
|
||||
async def _new_flow(
|
||||
|
|
|
|||
444
src/backend/base/langflow/api/v1/knowledge_bases.py
Normal file
444
src/backend/base/langflow/api/v1/knowledge_bases.py
Normal file
|
|
@ -0,0 +1,444 @@
|
|||
import json
|
||||
import shutil
|
||||
from http import HTTPStatus
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from langchain_chroma import Chroma
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langflow.api.utils import CurrentActiveUser
|
||||
from langflow.logging import logger
|
||||
from langflow.services.deps import get_settings_service
|
||||
|
||||
router = APIRouter(tags=["Knowledge Bases"], prefix="/knowledge_bases")
|
||||
|
||||
|
||||
settings = get_settings_service().settings
|
||||
knowledge_directory = settings.knowledge_bases_dir
|
||||
if not knowledge_directory:
|
||||
msg = "Knowledge bases directory is not set in the settings."
|
||||
raise ValueError(msg)
|
||||
KNOWLEDGE_BASES_DIR = Path(knowledge_directory).expanduser()
|
||||
|
||||
|
||||
class KnowledgeBaseInfo(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
embedding_provider: str | None = "Unknown"
|
||||
embedding_model: str | None = "Unknown"
|
||||
size: int = 0
|
||||
words: int = 0
|
||||
characters: int = 0
|
||||
chunks: int = 0
|
||||
avg_chunk_size: float = 0.0
|
||||
|
||||
|
||||
class BulkDeleteRequest(BaseModel):
|
||||
kb_names: list[str]
|
||||
|
||||
|
||||
def get_kb_root_path() -> Path:
|
||||
"""Get the knowledge bases root path."""
|
||||
return KNOWLEDGE_BASES_DIR
|
||||
|
||||
|
||||
def get_directory_size(path: Path) -> int:
|
||||
"""Calculate the total size of all files in a directory."""
|
||||
total_size = 0
|
||||
try:
|
||||
for file_path in path.rglob("*"):
|
||||
if file_path.is_file():
|
||||
total_size += file_path.stat().st_size
|
||||
except (OSError, PermissionError):
|
||||
pass
|
||||
return total_size
|
||||
|
||||
|
||||
def detect_embedding_provider(kb_path: Path) -> str:
|
||||
"""Detect the embedding provider from config files and directory structure."""
|
||||
# Provider patterns to check for
|
||||
provider_patterns = {
|
||||
"OpenAI": ["openai", "text-embedding-ada", "text-embedding-3"],
|
||||
"HuggingFace": ["sentence-transformers", "huggingface", "bert-"],
|
||||
"Cohere": ["cohere", "embed-english", "embed-multilingual"],
|
||||
"Google": ["palm", "gecko", "google"],
|
||||
"Chroma": ["chroma"],
|
||||
}
|
||||
|
||||
# Check JSON config files for provider information
|
||||
for config_file in kb_path.glob("*.json"):
|
||||
try:
|
||||
with config_file.open("r", encoding="utf-8") as f:
|
||||
config_data = json.load(f)
|
||||
if not isinstance(config_data, dict):
|
||||
continue
|
||||
|
||||
config_str = json.dumps(config_data).lower()
|
||||
|
||||
# Check for explicit provider fields first
|
||||
provider_fields = ["embedding_provider", "provider", "embedding_model_provider"]
|
||||
for field in provider_fields:
|
||||
if field in config_data:
|
||||
provider_value = str(config_data[field]).lower()
|
||||
for provider, patterns in provider_patterns.items():
|
||||
if any(pattern in provider_value for pattern in patterns):
|
||||
return provider
|
||||
|
||||
# Check for model name patterns
|
||||
for provider, patterns in provider_patterns.items():
|
||||
if any(pattern in config_str for pattern in patterns):
|
||||
return provider
|
||||
|
||||
except (OSError, json.JSONDecodeError) as _:
|
||||
logger.exception("Error reading config file '%s'", config_file)
|
||||
continue
|
||||
|
||||
# Fallback to directory structure
|
||||
if (kb_path / "chroma").exists():
|
||||
return "Chroma"
|
||||
if (kb_path / "vectors.npy").exists():
|
||||
return "Local"
|
||||
|
||||
return "Unknown"
|
||||
|
||||
|
||||
def detect_embedding_model(kb_path: Path) -> str:
|
||||
"""Detect the embedding model from config files."""
|
||||
# First check the embedding metadata file (most accurate)
|
||||
metadata_file = kb_path / "embedding_metadata.json"
|
||||
if metadata_file.exists():
|
||||
try:
|
||||
with metadata_file.open("r", encoding="utf-8") as f:
|
||||
metadata = json.load(f)
|
||||
if isinstance(metadata, dict) and "embedding_model" in metadata:
|
||||
# Check for embedding model field
|
||||
model_value = str(metadata.get("embedding_model", "unknown"))
|
||||
if model_value and model_value.lower() != "unknown":
|
||||
return model_value
|
||||
except (OSError, json.JSONDecodeError) as _:
|
||||
logger.exception("Error reading embedding metadata file '%s'", metadata_file)
|
||||
|
||||
# Check other JSON config files for model information
|
||||
for config_file in kb_path.glob("*.json"):
|
||||
# Skip the embedding metadata file since we already checked it
|
||||
if config_file.name == "embedding_metadata.json":
|
||||
continue
|
||||
|
||||
try:
|
||||
with config_file.open("r", encoding="utf-8") as f:
|
||||
config_data = json.load(f)
|
||||
if not isinstance(config_data, dict):
|
||||
continue
|
||||
|
||||
# Check for explicit model fields first and return the actual model name
|
||||
model_fields = ["embedding_model", "model", "embedding_model_name", "model_name"]
|
||||
for field in model_fields:
|
||||
if field in config_data:
|
||||
model_value = str(config_data[field])
|
||||
if model_value and model_value.lower() != "unknown":
|
||||
return model_value
|
||||
|
||||
# Check for OpenAI specific model names
|
||||
if "openai" in json.dumps(config_data).lower():
|
||||
openai_models = ["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]
|
||||
config_str = json.dumps(config_data).lower()
|
||||
for model in openai_models:
|
||||
if model in config_str:
|
||||
return model
|
||||
|
||||
# Check for HuggingFace model names (usually in model field)
|
||||
if "model" in config_data:
|
||||
model_name = str(config_data["model"])
|
||||
# Common HuggingFace embedding models
|
||||
hf_patterns = ["sentence-transformers", "all-MiniLM", "all-mpnet", "multi-qa"]
|
||||
if any(pattern in model_name for pattern in hf_patterns):
|
||||
return model_name
|
||||
|
||||
except (OSError, json.JSONDecodeError) as _:
|
||||
logger.exception("Error reading config file '%s'", config_file)
|
||||
continue
|
||||
|
||||
return "Unknown"
|
||||
|
||||
|
||||
def get_text_columns(df: pd.DataFrame, schema_data: list | None = None) -> list[str]:
|
||||
"""Get the text columns to analyze for word/character counts."""
|
||||
# First try schema-defined text columns
|
||||
if schema_data:
|
||||
text_columns = [
|
||||
col["column_name"]
|
||||
for col in schema_data
|
||||
if col.get("vectorize", False) and col.get("data_type") == "string"
|
||||
]
|
||||
if text_columns:
|
||||
return [col for col in text_columns if col in df.columns]
|
||||
|
||||
# Fallback to common text column names
|
||||
common_names = ["text", "content", "document", "chunk"]
|
||||
text_columns = [col for col in df.columns if col.lower() in common_names]
|
||||
if text_columns:
|
||||
return text_columns
|
||||
|
||||
# Last resort: all string columns
|
||||
return [col for col in df.columns if df[col].dtype == "object"]
|
||||
|
||||
|
||||
def calculate_text_metrics(df: pd.DataFrame, text_columns: list[str]) -> tuple[int, int]:
|
||||
"""Calculate total words and characters from text columns."""
|
||||
total_words = 0
|
||||
total_characters = 0
|
||||
|
||||
for col in text_columns:
|
||||
if col not in df.columns:
|
||||
continue
|
||||
|
||||
text_series = df[col].astype(str).fillna("")
|
||||
total_characters += text_series.str.len().sum()
|
||||
total_words += text_series.str.split().str.len().sum()
|
||||
|
||||
return int(total_words), int(total_characters)
|
||||
|
||||
|
||||
def get_kb_metadata(kb_path: Path) -> dict:
|
||||
"""Extract metadata from a knowledge base directory."""
|
||||
metadata: dict[str, float | int | str] = {
|
||||
"chunks": 0,
|
||||
"words": 0,
|
||||
"characters": 0,
|
||||
"avg_chunk_size": 0.0,
|
||||
"embedding_provider": "Unknown",
|
||||
"embedding_model": "Unknown",
|
||||
}
|
||||
|
||||
try:
|
||||
# First check embedding metadata file for accurate provider and model info
|
||||
metadata_file = kb_path / "embedding_metadata.json"
|
||||
if metadata_file.exists():
|
||||
try:
|
||||
with metadata_file.open("r", encoding="utf-8") as f:
|
||||
embedding_metadata = json.load(f)
|
||||
if isinstance(embedding_metadata, dict):
|
||||
if "embedding_provider" in embedding_metadata:
|
||||
metadata["embedding_provider"] = embedding_metadata["embedding_provider"]
|
||||
if "embedding_model" in embedding_metadata:
|
||||
metadata["embedding_model"] = embedding_metadata["embedding_model"]
|
||||
except (OSError, json.JSONDecodeError) as _:
|
||||
logger.exception("Error reading embedding metadata file '%s'", metadata_file)
|
||||
|
||||
# Fallback to detection if not found in metadata file
|
||||
if metadata["embedding_provider"] == "Unknown":
|
||||
metadata["embedding_provider"] = detect_embedding_provider(kb_path)
|
||||
if metadata["embedding_model"] == "Unknown":
|
||||
metadata["embedding_model"] = detect_embedding_model(kb_path)
|
||||
|
||||
# Read schema for text column information
|
||||
schema_data = None
|
||||
schema_file = kb_path / "schema.json"
|
||||
if schema_file.exists():
|
||||
try:
|
||||
with schema_file.open("r", encoding="utf-8") as f:
|
||||
schema_data = json.load(f)
|
||||
if not isinstance(schema_data, list):
|
||||
schema_data = None
|
||||
except (ValueError, TypeError, OSError) as _:
|
||||
logger.exception("Error reading schema file '%s'", schema_file)
|
||||
|
||||
# Create vector store
|
||||
chroma = Chroma(
|
||||
persist_directory=str(kb_path),
|
||||
collection_name=kb_path.name,
|
||||
)
|
||||
|
||||
# Access the raw collection
|
||||
collection = chroma._collection
|
||||
|
||||
# Fetch all documents and metadata
|
||||
results = collection.get(include=["documents", "metadatas"])
|
||||
|
||||
# Convert to pandas DataFrame
|
||||
source_chunks = pd.DataFrame(
|
||||
{
|
||||
"document": results["documents"],
|
||||
"metadata": results["metadatas"],
|
||||
}
|
||||
)
|
||||
|
||||
# Process the source data for metadata
|
||||
try:
|
||||
metadata["chunks"] = len(source_chunks)
|
||||
|
||||
# Get text columns and calculate metrics
|
||||
text_columns = get_text_columns(source_chunks, schema_data)
|
||||
if text_columns:
|
||||
words, characters = calculate_text_metrics(source_chunks, text_columns)
|
||||
metadata["words"] = words
|
||||
metadata["characters"] = characters
|
||||
|
||||
# Calculate average chunk size
|
||||
if int(metadata["chunks"]) > 0:
|
||||
metadata["avg_chunk_size"] = round(int(characters) / int(metadata["chunks"]), 1)
|
||||
|
||||
except (OSError, ValueError, TypeError) as _:
|
||||
logger.exception("Error processing Chroma DB '%s'", kb_path.name)
|
||||
|
||||
except (OSError, ValueError, TypeError) as _:
|
||||
logger.exception("Error processing knowledge base directory '%s'", kb_path)
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
@router.get("", status_code=HTTPStatus.OK)
|
||||
@router.get("/", status_code=HTTPStatus.OK)
|
||||
async def list_knowledge_bases(current_user: CurrentActiveUser) -> list[KnowledgeBaseInfo]:
|
||||
"""List all available knowledge bases."""
|
||||
try:
|
||||
kb_root_path = get_kb_root_path()
|
||||
kb_user = current_user.username
|
||||
kb_path = kb_root_path / kb_user
|
||||
|
||||
if not kb_path.exists():
|
||||
return []
|
||||
|
||||
knowledge_bases = []
|
||||
|
||||
for kb_dir in kb_path.iterdir():
|
||||
if not kb_dir.is_dir() or kb_dir.name.startswith("."):
|
||||
continue
|
||||
|
||||
try:
|
||||
# Get size of the directory
|
||||
size = get_directory_size(kb_dir)
|
||||
|
||||
# Get metadata from KB files
|
||||
metadata = get_kb_metadata(kb_dir)
|
||||
|
||||
kb_info = KnowledgeBaseInfo(
|
||||
id=kb_dir.name,
|
||||
name=kb_dir.name.replace("_", " ").replace("-", " ").title(),
|
||||
embedding_provider=metadata["embedding_provider"],
|
||||
embedding_model=metadata["embedding_model"],
|
||||
size=size,
|
||||
words=metadata["words"],
|
||||
characters=metadata["characters"],
|
||||
chunks=metadata["chunks"],
|
||||
avg_chunk_size=metadata["avg_chunk_size"],
|
||||
)
|
||||
|
||||
knowledge_bases.append(kb_info)
|
||||
|
||||
except OSError as _:
|
||||
# Log the exception and skip directories that can't be read
|
||||
await logger.aexception("Error reading knowledge base directory '%s'", kb_dir)
|
||||
continue
|
||||
|
||||
# Sort by name alphabetically
|
||||
knowledge_bases.sort(key=lambda x: x.name)
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Error listing knowledge bases: {e!s}") from e
|
||||
else:
|
||||
return knowledge_bases
|
||||
|
||||
|
||||
@router.get("/{kb_name}", status_code=HTTPStatus.OK)
|
||||
async def get_knowledge_base(kb_name: str, current_user: CurrentActiveUser) -> KnowledgeBaseInfo:
|
||||
"""Get detailed information about a specific knowledge base."""
|
||||
try:
|
||||
kb_root_path = get_kb_root_path()
|
||||
kb_user = current_user.username
|
||||
kb_path = kb_root_path / kb_user / kb_name
|
||||
|
||||
if not kb_path.exists() or not kb_path.is_dir():
|
||||
raise HTTPException(status_code=404, detail=f"Knowledge base '{kb_name}' not found")
|
||||
|
||||
# Get size of the directory
|
||||
size = get_directory_size(kb_path)
|
||||
|
||||
# Get metadata from KB files
|
||||
metadata = get_kb_metadata(kb_path)
|
||||
|
||||
return KnowledgeBaseInfo(
|
||||
id=kb_name,
|
||||
name=kb_name.replace("_", " ").replace("-", " ").title(),
|
||||
embedding_provider=metadata["embedding_provider"],
|
||||
embedding_model=metadata["embedding_model"],
|
||||
size=size,
|
||||
words=metadata["words"],
|
||||
characters=metadata["characters"],
|
||||
chunks=metadata["chunks"],
|
||||
avg_chunk_size=metadata["avg_chunk_size"],
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Error getting knowledge base '{kb_name}': {e!s}") from e
|
||||
|
||||
|
||||
@router.delete("/{kb_name}", status_code=HTTPStatus.OK)
|
||||
async def delete_knowledge_base(kb_name: str, current_user: CurrentActiveUser) -> dict[str, str]:
|
||||
"""Delete a specific knowledge base."""
|
||||
try:
|
||||
kb_root_path = get_kb_root_path()
|
||||
kb_user = current_user.username
|
||||
kb_path = kb_root_path / kb_user / kb_name
|
||||
|
||||
if not kb_path.exists() or not kb_path.is_dir():
|
||||
raise HTTPException(status_code=404, detail=f"Knowledge base '{kb_name}' not found")
|
||||
|
||||
# Delete the entire knowledge base directory
|
||||
shutil.rmtree(kb_path)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Error deleting knowledge base '{kb_name}': {e!s}") from e
|
||||
else:
|
||||
return {"message": f"Knowledge base '{kb_name}' deleted successfully"}
|
||||
|
||||
|
||||
@router.delete("", status_code=HTTPStatus.OK)
|
||||
@router.delete("/", status_code=HTTPStatus.OK)
|
||||
async def delete_knowledge_bases_bulk(request: BulkDeleteRequest, current_user: CurrentActiveUser) -> dict[str, object]:
|
||||
"""Delete multiple knowledge bases."""
|
||||
try:
|
||||
kb_root_path = get_kb_root_path()
|
||||
kb_user = current_user.username
|
||||
kb_user_path = kb_root_path / kb_user
|
||||
deleted_count = 0
|
||||
not_found_kbs = []
|
||||
|
||||
for kb_name in request.kb_names:
|
||||
kb_path = kb_user_path / kb_name
|
||||
|
||||
if not kb_path.exists() or not kb_path.is_dir():
|
||||
not_found_kbs.append(kb_name)
|
||||
continue
|
||||
|
||||
try:
|
||||
# Delete the entire knowledge base directory
|
||||
shutil.rmtree(kb_path)
|
||||
deleted_count += 1
|
||||
except (OSError, PermissionError) as e:
|
||||
await logger.aexception("Error deleting knowledge base '%s': %s", kb_name, e)
|
||||
# Continue with other deletions even if one fails
|
||||
|
||||
if not_found_kbs and deleted_count == 0:
|
||||
raise HTTPException(status_code=404, detail=f"Knowledge bases not found: {', '.join(not_found_kbs)}")
|
||||
|
||||
result = {
|
||||
"message": f"Successfully deleted {deleted_count} knowledge base(s)",
|
||||
"deleted_count": deleted_count,
|
||||
}
|
||||
|
||||
if not_found_kbs:
|
||||
result["not_found"] = ", ".join(not_found_kbs)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Error deleting knowledge bases: {e!s}") from e
|
||||
else:
|
||||
return result
|
||||
|
|
@ -4,7 +4,6 @@ import pydantic
|
|||
from anyio import BrokenResourceError
|
||||
from fastapi import APIRouter, HTTPException, Request, Response
|
||||
from fastapi.responses import HTMLResponse, StreamingResponse
|
||||
from loguru import logger
|
||||
from mcp import types
|
||||
from mcp.server import NotificationOptions, Server
|
||||
from mcp.server.sse import SseServerTransport
|
||||
|
|
@ -18,6 +17,7 @@ from langflow.api.v1.mcp_utils import (
|
|||
handle_mcp_errors,
|
||||
handle_read_resource,
|
||||
)
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.services.deps import get_settings_service
|
||||
|
||||
router = APIRouter(prefix="/mcp", tags=["mcp"])
|
||||
|
|
@ -83,22 +83,22 @@ async def im_alive():
|
|||
@router.get("/sse", response_class=StreamingResponse)
|
||||
async def handle_sse(request: Request, current_user: CurrentActiveMCPUser):
|
||||
msg = f"Starting SSE connection, server name: {server.name}"
|
||||
logger.info(msg)
|
||||
await logger.ainfo(msg)
|
||||
token = current_user_ctx.set(current_user)
|
||||
try:
|
||||
async with sse.connect_sse(request.scope, request.receive, request._send) as streams:
|
||||
try:
|
||||
msg = "Starting SSE connection"
|
||||
logger.debug(msg)
|
||||
await logger.adebug(msg)
|
||||
msg = f"Stream types: read={type(streams[0])}, write={type(streams[1])}"
|
||||
logger.debug(msg)
|
||||
await logger.adebug(msg)
|
||||
|
||||
notification_options = NotificationOptions(
|
||||
prompts_changed=True, resources_changed=True, tools_changed=True
|
||||
)
|
||||
init_options = server.create_initialization_options(notification_options)
|
||||
msg = f"Initialization options: {init_options}"
|
||||
logger.debug(msg)
|
||||
await logger.adebug(msg)
|
||||
|
||||
try:
|
||||
await server.run(streams[0], streams[1], init_options)
|
||||
|
|
@ -106,20 +106,20 @@ async def handle_sse(request: Request, current_user: CurrentActiveMCPUser):
|
|||
validation_error = find_validation_error(exc)
|
||||
if validation_error:
|
||||
msg = "Validation error in MCP:" + str(validation_error)
|
||||
logger.debug(msg)
|
||||
await logger.adebug(msg)
|
||||
else:
|
||||
msg = f"Error in MCP: {exc!s}"
|
||||
logger.debug(msg)
|
||||
await logger.adebug(msg)
|
||||
return
|
||||
except BrokenResourceError:
|
||||
# Handle gracefully when client disconnects
|
||||
logger.info("Client disconnected from SSE connection")
|
||||
await logger.ainfo("Client disconnected from SSE connection")
|
||||
except asyncio.CancelledError:
|
||||
logger.info("SSE connection was cancelled")
|
||||
await logger.ainfo("SSE connection was cancelled")
|
||||
raise
|
||||
except Exception as e:
|
||||
msg = f"Error in MCP: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise
|
||||
finally:
|
||||
current_user_ctx.reset(token)
|
||||
|
|
@ -130,8 +130,8 @@ async def handle_messages(request: Request):
|
|||
try:
|
||||
await sse.handle_post_message(request.scope, request.receive, request._send)
|
||||
except (BrokenResourceError, BrokenPipeError) as e:
|
||||
logger.info("MCP Server disconnected")
|
||||
await logger.ainfo("MCP Server disconnected")
|
||||
raise HTTPException(status_code=404, detail=f"MCP Server disconnected, error: {e}") from e
|
||||
except Exception as e:
|
||||
logger.error(f"Internal server error: {e}")
|
||||
await logger.aerror(f"Internal server error: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {e}") from e
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
from asyncio.subprocess import create_subprocess_exec
|
||||
|
|
@ -9,10 +8,11 @@ from datetime import datetime, timezone
|
|||
from ipaddress import ip_address
|
||||
from pathlib import Path
|
||||
from subprocess import CalledProcessError
|
||||
from typing import Annotated, Any
|
||||
from uuid import UUID
|
||||
|
||||
from anyio import BrokenResourceError
|
||||
from fastapi import APIRouter, HTTPException, Request, Response
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request, Response
|
||||
from fastapi.responses import HTMLResponse
|
||||
from mcp import types
|
||||
from mcp.server import NotificationOptions, Server
|
||||
|
|
@ -30,6 +30,7 @@ from langflow.api.v1.mcp_utils import (
|
|||
handle_read_resource,
|
||||
)
|
||||
from langflow.api.v1.schemas import (
|
||||
AuthSettings,
|
||||
MCPInstallRequest,
|
||||
MCPProjectResponse,
|
||||
MCPProjectUpdateRequest,
|
||||
|
|
@ -37,14 +38,113 @@ from langflow.api.v1.schemas import (
|
|||
)
|
||||
from langflow.base.mcp.constants import MAX_MCP_SERVER_NAME_LENGTH
|
||||
from langflow.base.mcp.util import sanitize_mcp_name
|
||||
from langflow.logging import logger
|
||||
from langflow.services.auth.mcp_encryption import decrypt_auth_settings, encrypt_auth_settings
|
||||
from langflow.services.database.models import Flow, Folder
|
||||
from langflow.services.database.models.api_key.crud import check_key, create_api_key
|
||||
from langflow.services.database.models.api_key.model import ApiKeyCreate
|
||||
from langflow.services.database.models.user.model import User
|
||||
from langflow.services.deps import get_settings_service, session_scope
|
||||
from langflow.services.settings.feature_flags import FEATURE_FLAGS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/mcp/project", tags=["mcp_projects"])
|
||||
|
||||
|
||||
async def verify_project_auth(
|
||||
project_id: UUID,
|
||||
query_param: str | None = None,
|
||||
header_param: str | None = None,
|
||||
) -> User:
|
||||
"""Custom authentication for MCP project endpoints when API key is required.
|
||||
|
||||
This is only used when MCP composer is enabled and project requires API key auth.
|
||||
"""
|
||||
async with session_scope() as session:
|
||||
# First, get the project to check its auth settings
|
||||
project = (await session.exec(select(Folder).where(Folder.id == project_id))).first()
|
||||
|
||||
if not project:
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
# For MCP composer enabled, only use API key
|
||||
api_key = query_param or header_param
|
||||
if not api_key:
|
||||
raise HTTPException(
|
||||
status_code=401,
|
||||
detail="API key required for this project. Provide x-api-key header or query parameter.",
|
||||
)
|
||||
|
||||
# Validate the API key
|
||||
user = await check_key(session, api_key)
|
||||
if not user:
|
||||
raise HTTPException(status_code=401, detail="Invalid API key")
|
||||
|
||||
# Verify user has access to the project
|
||||
project_access = (
|
||||
await session.exec(select(Folder).where(Folder.id == project_id, Folder.user_id == user.id))
|
||||
).first()
|
||||
|
||||
if not project_access:
|
||||
raise HTTPException(status_code=403, detail="Access denied to this project")
|
||||
|
||||
return user
|
||||
|
||||
|
||||
# Smart authentication dependency that chooses method based on project settings
|
||||
async def verify_project_auth_conditional(
|
||||
project_id: UUID,
|
||||
request: Request,
|
||||
) -> User:
|
||||
"""Choose authentication method based on project settings.
|
||||
|
||||
- MCP Composer enabled + API key auth: Only allow API keys
|
||||
- All other cases: Use standard MCP auth (JWT + API keys)
|
||||
"""
|
||||
async with session_scope() as session:
|
||||
# Get project to check auth settings
|
||||
project = (await session.exec(select(Folder).where(Folder.id == project_id))).first()
|
||||
|
||||
if not project:
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
# Check if this project requires API key only authentication
|
||||
if FEATURE_FLAGS.mcp_composer and project.auth_settings:
|
||||
auth_settings = AuthSettings(**project.auth_settings)
|
||||
if auth_settings.auth_type == "apikey":
|
||||
# For MCP composer projects with API key auth, use custom API key validation
|
||||
api_key_header_value = request.headers.get("x-api-key")
|
||||
api_key_query_value = request.query_params.get("x-api-key")
|
||||
return await verify_project_auth(project_id, api_key_query_value, api_key_header_value)
|
||||
|
||||
# For all other cases, use standard MCP authentication (allows JWT + API keys)
|
||||
# Extract token
|
||||
token: str | None = None
|
||||
auth_header = request.headers.get("authorization")
|
||||
if auth_header and auth_header.startswith("Bearer "):
|
||||
token = auth_header[7:]
|
||||
|
||||
# Extract API keys
|
||||
api_key_query_value = request.query_params.get("x-api-key")
|
||||
api_key_header_value = request.headers.get("x-api-key")
|
||||
|
||||
# Call the MCP auth function directly
|
||||
from langflow.services.auth.utils import get_current_user_mcp
|
||||
|
||||
user = await get_current_user_mcp(
|
||||
token=token or "", query_param=api_key_query_value, header_param=api_key_header_value, db=session
|
||||
)
|
||||
|
||||
# Verify project access
|
||||
project_access = (
|
||||
await session.exec(select(Folder).where(Folder.id == project_id, Folder.user_id == user.id))
|
||||
).first()
|
||||
|
||||
if not project_access:
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
return user
|
||||
|
||||
|
||||
# Create project-specific context variable
|
||||
current_project_ctx: ContextVar[UUID | None] = ContextVar("current_project_ctx", default=None)
|
||||
|
||||
|
|
@ -106,7 +206,7 @@ async def list_project_tools(
|
|||
)
|
||||
try:
|
||||
tool = MCPSettings(
|
||||
id=str(flow.id),
|
||||
id=flow.id,
|
||||
action_name=name,
|
||||
action_description=description,
|
||||
mcp_enabled=flow.mcp_enabled,
|
||||
|
|
@ -117,26 +217,28 @@ async def list_project_tools(
|
|||
tools.append(tool)
|
||||
except Exception as e: # noqa: BLE001
|
||||
msg = f"Error in listing project tools: {e!s} from flow: {name}"
|
||||
logger.warning(msg)
|
||||
await logger.awarning(msg)
|
||||
continue
|
||||
|
||||
# Get project-level auth settings
|
||||
# Get project-level auth settings and decrypt sensitive fields
|
||||
auth_settings = None
|
||||
if project.auth_settings:
|
||||
from langflow.api.v1.schemas import AuthSettings
|
||||
|
||||
auth_settings = AuthSettings(**project.auth_settings)
|
||||
# Decrypt sensitive fields before returning
|
||||
decrypted_settings = decrypt_auth_settings(project.auth_settings)
|
||||
auth_settings = AuthSettings(**decrypted_settings) if decrypted_settings else None
|
||||
|
||||
except Exception as e:
|
||||
msg = f"Error listing project tools: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise HTTPException(status_code=500, detail=str(e)) from e
|
||||
|
||||
return MCPProjectResponse(tools=tools, auth_settings=auth_settings)
|
||||
|
||||
|
||||
@router.head("/{project_id}/sse", response_class=HTMLResponse, include_in_schema=False)
|
||||
async def im_alive():
|
||||
async def im_alive(project_id: str): # noqa: ARG001
|
||||
return Response()
|
||||
|
||||
|
||||
|
|
@ -144,22 +246,13 @@ async def im_alive():
|
|||
async def handle_project_sse(
|
||||
project_id: UUID,
|
||||
request: Request,
|
||||
current_user: CurrentActiveMCPUser,
|
||||
current_user: Annotated[User, Depends(verify_project_auth_conditional)],
|
||||
):
|
||||
"""Handle SSE connections for a specific project."""
|
||||
# Verify project exists and user has access
|
||||
async with session_scope() as session:
|
||||
project = (
|
||||
await session.exec(select(Folder).where(Folder.id == project_id, Folder.user_id == current_user.id))
|
||||
).first()
|
||||
|
||||
if not project:
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
# Get project-specific SSE transport and MCP server
|
||||
sse = get_project_sse(project_id)
|
||||
project_server = get_project_mcp_server(project_id)
|
||||
logger.debug("Project MCP server name: %s", project_server.server.name)
|
||||
await logger.adebug("Project MCP server name: %s", project_server.server.name)
|
||||
|
||||
# Set context variables
|
||||
user_token = current_user_ctx.set(current_user)
|
||||
|
|
@ -168,7 +261,7 @@ async def handle_project_sse(
|
|||
try:
|
||||
async with sse.connect_sse(request.scope, request.receive, request._send) as streams:
|
||||
try:
|
||||
logger.debug("Starting SSE connection for project %s", project_id)
|
||||
await logger.adebug("Starting SSE connection for project %s", project_id)
|
||||
|
||||
notification_options = NotificationOptions(
|
||||
prompts_changed=True, resources_changed=True, tools_changed=True
|
||||
|
|
@ -177,15 +270,15 @@ async def handle_project_sse(
|
|||
|
||||
try:
|
||||
await project_server.server.run(streams[0], streams[1], init_options)
|
||||
except Exception:
|
||||
logger.exception("Error in project MCP")
|
||||
except Exception: # noqa: BLE001
|
||||
await logger.aexception("Error in project MCP")
|
||||
except BrokenResourceError:
|
||||
logger.info("Client disconnected from project SSE connection")
|
||||
await logger.ainfo("Client disconnected from project SSE connection")
|
||||
except asyncio.CancelledError:
|
||||
logger.info("Project SSE connection was cancelled")
|
||||
await logger.ainfo("Project SSE connection was cancelled")
|
||||
raise
|
||||
except Exception:
|
||||
logger.exception("Error in project MCP")
|
||||
await logger.aexception("Error in project MCP")
|
||||
raise
|
||||
finally:
|
||||
current_user_ctx.reset(user_token)
|
||||
|
|
@ -195,17 +288,12 @@ async def handle_project_sse(
|
|||
|
||||
|
||||
@router.post("/{project_id}")
|
||||
async def handle_project_messages(project_id: UUID, request: Request, current_user: CurrentActiveMCPUser):
|
||||
async def handle_project_messages(
|
||||
project_id: UUID,
|
||||
request: Request,
|
||||
current_user: Annotated[User, Depends(verify_project_auth_conditional)],
|
||||
):
|
||||
"""Handle POST messages for a project-specific MCP server."""
|
||||
# Verify project exists and user has access
|
||||
async with session_scope() as session:
|
||||
project = (
|
||||
await session.exec(select(Folder).where(Folder.id == project_id, Folder.user_id == current_user.id))
|
||||
).first()
|
||||
|
||||
if not project:
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
# Set context variables
|
||||
user_token = current_user_ctx.set(current_user)
|
||||
project_token = current_project_ctx.set(project_id)
|
||||
|
|
@ -214,7 +302,7 @@ async def handle_project_messages(project_id: UUID, request: Request, current_us
|
|||
sse = get_project_sse(project_id)
|
||||
await sse.handle_post_message(request.scope, request.receive, request._send)
|
||||
except BrokenResourceError as e:
|
||||
logger.info("Project MCP Server disconnected for project %s", project_id)
|
||||
await logger.ainfo("Project MCP Server disconnected for project %s", project_id)
|
||||
raise HTTPException(status_code=404, detail=f"Project MCP Server disconnected, error: {e}") from e
|
||||
finally:
|
||||
current_user_ctx.reset(user_token)
|
||||
|
|
@ -222,7 +310,11 @@ async def handle_project_messages(project_id: UUID, request: Request, current_us
|
|||
|
||||
|
||||
@router.post("/{project_id}/")
|
||||
async def handle_project_messages_with_slash(project_id: UUID, request: Request, current_user: CurrentActiveMCPUser):
|
||||
async def handle_project_messages_with_slash(
|
||||
project_id: UUID,
|
||||
request: Request,
|
||||
current_user: Annotated[User, Depends(verify_project_auth_conditional)],
|
||||
):
|
||||
"""Handle POST messages for a project-specific MCP server with trailing slash."""
|
||||
# Call the original handler
|
||||
return await handle_project_messages(project_id, request, current_user)
|
||||
|
|
@ -249,11 +341,33 @@ async def update_project_mcp_settings(
|
|||
if not project:
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
# Update project-level auth settings
|
||||
if request.auth_settings:
|
||||
project.auth_settings = request.auth_settings.model_dump(mode="json")
|
||||
else:
|
||||
project.auth_settings = None
|
||||
# Update project-level auth settings with encryption
|
||||
if "auth_settings" in request.model_fields_set:
|
||||
if request.auth_settings is None:
|
||||
# Explicitly set to None - clear auth settings
|
||||
project.auth_settings = None
|
||||
else:
|
||||
# Use python mode to get raw values without SecretStr masking
|
||||
auth_model = request.auth_settings
|
||||
auth_dict = auth_model.model_dump(mode="python", exclude_none=True)
|
||||
|
||||
# Extract actual secret values before encryption
|
||||
from pydantic import SecretStr
|
||||
|
||||
# Handle api_key if it's a SecretStr
|
||||
api_key_val = getattr(auth_model, "api_key", None)
|
||||
if isinstance(api_key_val, SecretStr):
|
||||
auth_dict["api_key"] = api_key_val.get_secret_value()
|
||||
|
||||
# Handle oauth_client_secret if it's a SecretStr
|
||||
client_secret_val = getattr(auth_model, "oauth_client_secret", None)
|
||||
if isinstance(client_secret_val, SecretStr):
|
||||
auth_dict["oauth_client_secret"] = client_secret_val.get_secret_value()
|
||||
|
||||
# Encrypt and store
|
||||
encrypted_settings = encrypt_auth_settings(auth_dict)
|
||||
project.auth_settings = encrypted_settings
|
||||
|
||||
session.add(project)
|
||||
|
||||
# Query flows in the project
|
||||
|
|
@ -280,7 +394,7 @@ async def update_project_mcp_settings(
|
|||
|
||||
except Exception as e:
|
||||
msg = f"Error updating project MCP settings: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise HTTPException(status_code=500, detail=str(e)) from e
|
||||
|
||||
|
||||
|
|
@ -348,6 +462,7 @@ async def install_mcp_config(
|
|||
if not is_local_ip(client_ip):
|
||||
raise HTTPException(status_code=500, detail="MCP configuration can only be installed from a local connection")
|
||||
|
||||
removed_servers: list[str] = [] # Track removed servers for reinstallation
|
||||
try:
|
||||
# Verify project exists and user has access
|
||||
async with session_scope() as session:
|
||||
|
|
@ -358,6 +473,28 @@ async def install_mcp_config(
|
|||
if not project:
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
# Check if project requires API key authentication and generate if needed
|
||||
generated_api_key = None
|
||||
|
||||
# Determine if we need to generate an API key based on feature flag
|
||||
should_generate_api_key = False
|
||||
if not FEATURE_FLAGS.mcp_composer:
|
||||
# When MCP_COMPOSER is disabled, only generate API key if autologin is disabled
|
||||
# (matches frontend !isAutoLogin check)
|
||||
settings_service = get_settings_service()
|
||||
should_generate_api_key = not settings_service.auth_settings.AUTO_LOGIN
|
||||
elif project.auth_settings:
|
||||
# When MCP_COMPOSER is enabled, only generate if auth_type is "apikey"
|
||||
auth_settings = AuthSettings(**project.auth_settings) if project.auth_settings else AuthSettings()
|
||||
should_generate_api_key = auth_settings.auth_type == "apikey"
|
||||
|
||||
if should_generate_api_key:
|
||||
# Generate API key with specific name format
|
||||
api_key_name = f"MCP Project {project.name} - {body.client}"
|
||||
api_key_create = ApiKeyCreate(name=api_key_name)
|
||||
unmasked_api_key = await create_api_key(session, api_key_create, current_user.id)
|
||||
generated_api_key = unmasked_api_key.api_key
|
||||
|
||||
# Get settings service to build the SSE URL
|
||||
settings_service = get_settings_service()
|
||||
host = getattr(settings_service.settings, "host", "localhost")
|
||||
|
|
@ -368,13 +505,12 @@ async def install_mcp_config(
|
|||
# Determine command and args based on operating system
|
||||
os_type = platform.system()
|
||||
command = "uvx"
|
||||
mcp_tool = "mcp-composer" if FEATURE_FLAGS.mcp_composer else "mcp-proxy"
|
||||
|
||||
# Check if running on WSL (will appear as Linux but with Microsoft in release info)
|
||||
is_wsl = os_type == "Linux" and "microsoft" in platform.uname().release.lower()
|
||||
|
||||
if is_wsl:
|
||||
logger.debug("WSL detected, using Windows-specific configuration")
|
||||
await logger.adebug("WSL detected, using Windows-specific configuration")
|
||||
|
||||
# If we're in WSL and the host is localhost, we might need to adjust the URL
|
||||
# so Windows applications can reach the WSL service
|
||||
|
|
@ -389,66 +525,62 @@ async def install_mcp_config(
|
|||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
)
|
||||
stdout, stderr = await proc.communicate()
|
||||
stdout, _ = await proc.communicate()
|
||||
|
||||
if proc.returncode == 0 and stdout.strip():
|
||||
wsl_ip = stdout.decode().strip().split()[0] # Get first IP address
|
||||
logger.debug("Using WSL IP for external access: %s", wsl_ip)
|
||||
await logger.adebug("Using WSL IP for external access: %s", wsl_ip)
|
||||
# Replace the localhost with the WSL IP in the URL
|
||||
sse_url = sse_url.replace(f"http://{host}:{port}", f"http://{wsl_ip}:{port}")
|
||||
except OSError as e:
|
||||
logger.warning("Failed to get WSL IP address: %s. Using default URL.", str(e))
|
||||
await logger.awarning("Failed to get WSL IP address: %s. Using default URL.", str(e))
|
||||
|
||||
# Configure args based on the MCP tool
|
||||
oauth_env = None
|
||||
# Base args
|
||||
args = ["mcp-composer"] if FEATURE_FLAGS.mcp_composer else ["mcp-proxy"]
|
||||
|
||||
# Add authentication args based on MCP_COMPOSER feature flag and auth settings
|
||||
if not FEATURE_FLAGS.mcp_composer:
|
||||
# When MCP_COMPOSER is disabled, only use headers format if API key was generated
|
||||
# (when autologin is disabled)
|
||||
if generated_api_key:
|
||||
args.extend(["--headers", "x-api-key", generated_api_key])
|
||||
elif project.auth_settings:
|
||||
# Decrypt sensitive fields before using them
|
||||
decrypted_settings = decrypt_auth_settings(project.auth_settings)
|
||||
auth_settings = AuthSettings(**decrypted_settings) if decrypted_settings else AuthSettings()
|
||||
args.extend(["--auth_type", auth_settings.auth_type])
|
||||
|
||||
# When MCP_COMPOSER is enabled, only add headers if auth_type is "apikey"
|
||||
auth_settings = AuthSettings(**project.auth_settings)
|
||||
if auth_settings.auth_type == "apikey" and generated_api_key:
|
||||
args.extend(["--headers", "x-api-key", generated_api_key])
|
||||
# If no auth_settings or auth_type is "none", don't add any auth headers
|
||||
|
||||
# Add the SSE URL
|
||||
if FEATURE_FLAGS.mcp_composer:
|
||||
args = [mcp_tool, "--sse-url", sse_url]
|
||||
|
||||
# Check for auth settings and add auth parameters
|
||||
if project.auth_settings:
|
||||
from langflow.api.v1.schemas import AuthSettings
|
||||
|
||||
auth_settings = AuthSettings(**project.auth_settings)
|
||||
args.extend(["--auth_type", auth_settings.auth_type])
|
||||
|
||||
oauth_env = {
|
||||
"OAUTH_HOST": auth_settings.oauth_host,
|
||||
"OAUTH_PORT": auth_settings.oauth_port,
|
||||
"OAUTH_SERVER_URL": auth_settings.oauth_server_url,
|
||||
"OAUTH_CALLBACK_PATH": auth_settings.oauth_callback_path,
|
||||
"OAUTH_CLIENT_ID": auth_settings.oauth_client_id,
|
||||
"OAUTH_CLIENT_SECRET": auth_settings.oauth_client_secret,
|
||||
"OAUTH_AUTH_URL": auth_settings.oauth_auth_url,
|
||||
"OAUTH_TOKEN_URL": auth_settings.oauth_token_url,
|
||||
"OAUTH_MCP_SCOPE": auth_settings.oauth_mcp_scope,
|
||||
"OAUTH_PROVIDER_SCOPE": auth_settings.oauth_provider_scope,
|
||||
}
|
||||
args.extend(["--sse-url", sse_url])
|
||||
else:
|
||||
args = [mcp_tool, sse_url]
|
||||
args.append(sse_url)
|
||||
|
||||
if os_type == "Windows":
|
||||
command = "cmd"
|
||||
args = ["/c", "uvx", *args]
|
||||
logger.debug("Windows detected, using cmd command")
|
||||
await logger.adebug("Windows detected, using cmd command")
|
||||
|
||||
name = project.name
|
||||
|
||||
# Create the MCP configuration
|
||||
server_config = {
|
||||
server_config: dict[str, Any] = {
|
||||
"command": command,
|
||||
"args": args,
|
||||
}
|
||||
|
||||
# Add environment variables if mcp-composer feature flag is enabled and auth settings exist
|
||||
if FEATURE_FLAGS.mcp_composer and oauth_env is not None:
|
||||
server_config["env"] = oauth_env # type: ignore[assignment]
|
||||
|
||||
mcp_config = {
|
||||
"mcpServers": {f"lf-{sanitize_mcp_name(name)[: (MAX_MCP_SERVER_NAME_LENGTH - 4)]}": server_config}
|
||||
}
|
||||
|
||||
server_name = f"lf-{sanitize_mcp_name(name)[: (MAX_MCP_SERVER_NAME_LENGTH - 4)]}"
|
||||
logger.debug("Installing MCP config for project: %s (server name: %s)", project.name, server_name)
|
||||
await logger.adebug("Installing MCP config for project: %s (server name: %s)", project.name, server_name)
|
||||
|
||||
# Determine the config file path based on the client and OS
|
||||
if body.client.lower() == "cursor":
|
||||
|
|
@ -500,7 +632,7 @@ async def install_mcp_config(
|
|||
status_code=400, detail="Windows C: drive not mounted at /mnt/c in WSL"
|
||||
)
|
||||
except (OSError, CalledProcessError) as e:
|
||||
logger.warning("Failed to determine Windows user path in WSL: %s", str(e))
|
||||
await logger.awarning("Failed to determine Windows user path in WSL: %s", str(e))
|
||||
raise HTTPException(
|
||||
status_code=400, detail=f"Could not determine Windows Claude config path in WSL: {e!s}"
|
||||
) from e
|
||||
|
|
@ -525,9 +657,18 @@ async def install_mcp_config(
|
|||
# If file exists but is invalid JSON, start fresh
|
||||
existing_config = {"mcpServers": {}}
|
||||
|
||||
# Merge new config with existing config
|
||||
# Ensure mcpServers section exists
|
||||
if "mcpServers" not in existing_config:
|
||||
existing_config["mcpServers"] = {}
|
||||
|
||||
# Remove any existing servers with the same SSE URL (for reinstalling)
|
||||
project_sse_url = await get_project_sse_url(project_id)
|
||||
existing_config, removed_servers = remove_server_by_sse_url(existing_config, project_sse_url)
|
||||
|
||||
if removed_servers:
|
||||
logger.info("Removed existing MCP servers with same SSE URL for reinstall: %s", removed_servers)
|
||||
|
||||
# Merge new config with existing config
|
||||
existing_config["mcpServers"].update(mcp_config["mcpServers"])
|
||||
|
||||
# Write the updated config
|
||||
|
|
@ -536,11 +677,17 @@ async def install_mcp_config(
|
|||
|
||||
except Exception as e:
|
||||
msg = f"Error installing MCP configuration: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise HTTPException(status_code=500, detail=str(e)) from e
|
||||
else:
|
||||
message = f"Successfully installed MCP configuration for {body.client}"
|
||||
logger.info(message)
|
||||
action = "reinstalled" if removed_servers else "installed"
|
||||
message = f"Successfully {action} MCP configuration for {body.client}"
|
||||
if removed_servers:
|
||||
message += f" (replaced existing servers: {', '.join(removed_servers)})"
|
||||
if generated_api_key:
|
||||
auth_type = "API key" if FEATURE_FLAGS.mcp_composer else "legacy API key"
|
||||
message += f" with {auth_type} authentication (key name: 'MCP Project {project.name} - {body.client}')"
|
||||
await logger.ainfo(message)
|
||||
return {"message": message}
|
||||
|
||||
|
||||
|
|
@ -560,12 +707,11 @@ async def check_installed_mcp_servers(
|
|||
if not project:
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
# Project server name pattern (must match the logic in install function)
|
||||
name = project.name
|
||||
project_server_name = f"lf-{sanitize_mcp_name(name)[: (MAX_MCP_SERVER_NAME_LENGTH - 4)]}"
|
||||
# Generate the SSE URL for this project
|
||||
project_sse_url = await get_project_sse_url(project_id)
|
||||
|
||||
logger.debug(
|
||||
"Checking for installed MCP servers for project: %s (server name: %s)", project.name, project_server_name
|
||||
await logger.adebug(
|
||||
"Checking for installed MCP servers for project: %s (SSE URL: %s)", project.name, project_sse_url
|
||||
)
|
||||
|
||||
# Check configurations for different clients
|
||||
|
|
@ -573,43 +719,45 @@ async def check_installed_mcp_servers(
|
|||
|
||||
# Check Cursor configuration
|
||||
cursor_config_path = Path.home() / ".cursor" / "mcp.json"
|
||||
logger.debug("Checking Cursor config at: %s (exists: %s)", cursor_config_path, cursor_config_path.exists())
|
||||
await logger.adebug(
|
||||
"Checking Cursor config at: %s (exists: %s)", cursor_config_path, cursor_config_path.exists()
|
||||
)
|
||||
if cursor_config_path.exists():
|
||||
try:
|
||||
with cursor_config_path.open("r") as f:
|
||||
cursor_config = json.load(f)
|
||||
if "mcpServers" in cursor_config and project_server_name in cursor_config["mcpServers"]:
|
||||
logger.debug("Found Cursor config for project server: %s", project_server_name)
|
||||
if config_contains_sse_url(cursor_config, project_sse_url):
|
||||
await logger.adebug("Found Cursor config with matching SSE URL: %s", project_sse_url)
|
||||
results.append("cursor")
|
||||
else:
|
||||
logger.debug(
|
||||
"Cursor config exists but no entry for server: %s (available servers: %s)",
|
||||
project_server_name,
|
||||
await logger.adebug(
|
||||
"Cursor config exists but no server with SSE URL: %s (available servers: %s)",
|
||||
project_sse_url,
|
||||
list(cursor_config.get("mcpServers", {}).keys()),
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
logger.warning("Failed to parse Cursor config JSON at: %s", cursor_config_path)
|
||||
await logger.awarning("Failed to parse Cursor config JSON at: %s", cursor_config_path)
|
||||
|
||||
# Check Windsurf configuration
|
||||
windsurf_config_path = Path.home() / ".codeium" / "windsurf" / "mcp_config.json"
|
||||
logger.debug(
|
||||
await logger.adebug(
|
||||
"Checking Windsurf config at: %s (exists: %s)", windsurf_config_path, windsurf_config_path.exists()
|
||||
)
|
||||
if windsurf_config_path.exists():
|
||||
try:
|
||||
with windsurf_config_path.open("r") as f:
|
||||
windsurf_config = json.load(f)
|
||||
if "mcpServers" in windsurf_config and project_server_name in windsurf_config["mcpServers"]:
|
||||
logger.debug("Found Windsurf config for project server: %s", project_server_name)
|
||||
if config_contains_sse_url(windsurf_config, project_sse_url):
|
||||
await logger.adebug("Found Windsurf config with matching SSE URL: %s", project_sse_url)
|
||||
results.append("windsurf")
|
||||
else:
|
||||
logger.debug(
|
||||
"Windsurf config exists but no entry for server: %s (available servers: %s)",
|
||||
project_server_name,
|
||||
await logger.adebug(
|
||||
"Windsurf config exists but no server with SSE URL: %s (available servers: %s)",
|
||||
project_sse_url,
|
||||
list(windsurf_config.get("mcpServers", {}).keys()),
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
logger.warning("Failed to parse Windsurf config JSON at: %s", windsurf_config_path)
|
||||
await logger.awarning("Failed to parse Windsurf config JSON at: %s", windsurf_config_path)
|
||||
|
||||
# Check Claude configuration
|
||||
claude_config_path = None
|
||||
|
|
@ -654,7 +802,7 @@ async def check_installed_mcp_servers(
|
|||
user_dirs[0] / "AppData" / "Roaming" / "Claude" / "claude_desktop_config.json"
|
||||
)
|
||||
except (OSError, CalledProcessError) as e:
|
||||
logger.warning(
|
||||
await logger.awarning(
|
||||
"Failed to determine Windows user path in WSL for checking Claude config: %s", str(e)
|
||||
)
|
||||
# Don't set claude_config_path, so it will be skipped
|
||||
|
|
@ -663,31 +811,168 @@ async def check_installed_mcp_servers(
|
|||
claude_config_path = Path(os.environ["APPDATA"]) / "Claude" / "claude_desktop_config.json"
|
||||
|
||||
if claude_config_path and claude_config_path.exists():
|
||||
logger.debug("Checking Claude config at: %s", claude_config_path)
|
||||
await logger.adebug("Checking Claude config at: %s", claude_config_path)
|
||||
try:
|
||||
with claude_config_path.open("r") as f:
|
||||
claude_config = json.load(f)
|
||||
if "mcpServers" in claude_config and project_server_name in claude_config["mcpServers"]:
|
||||
logger.debug("Found Claude config for project server: %s", project_server_name)
|
||||
if config_contains_sse_url(claude_config, project_sse_url):
|
||||
await logger.adebug("Found Claude config with matching SSE URL: %s", project_sse_url)
|
||||
results.append("claude")
|
||||
else:
|
||||
logger.debug(
|
||||
"Claude config exists but no entry for server: %s (available servers: %s)",
|
||||
project_server_name,
|
||||
await logger.adebug(
|
||||
"Claude config exists but no server with SSE URL: %s (available servers: %s)",
|
||||
project_sse_url,
|
||||
list(claude_config.get("mcpServers", {}).keys()),
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
logger.warning("Failed to parse Claude config JSON at: %s", claude_config_path)
|
||||
await logger.awarning("Failed to parse Claude config JSON at: %s", claude_config_path)
|
||||
else:
|
||||
logger.debug("Claude config path not found or doesn't exist: %s", claude_config_path)
|
||||
await logger.adebug("Claude config path not found or doesn't exist: %s", claude_config_path)
|
||||
|
||||
except Exception as e:
|
||||
msg = f"Error checking MCP configuration: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise HTTPException(status_code=500, detail=str(e)) from e
|
||||
return results
|
||||
|
||||
|
||||
def config_contains_sse_url(config_data: dict, sse_url: str) -> bool:
|
||||
"""Check if any MCP server in the config uses the specified SSE URL."""
|
||||
mcp_servers = config_data.get("mcpServers", {})
|
||||
for server_name, server_config in mcp_servers.items():
|
||||
args = server_config.get("args", [])
|
||||
# The SSE URL is typically the last argument in mcp-proxy configurations
|
||||
if args and args[-1] == sse_url:
|
||||
logger.debug("Found matching SSE URL in server: %s", server_name)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
async def get_project_sse_url(project_id: UUID) -> str:
|
||||
"""Generate the SSE URL for a project, including WSL handling."""
|
||||
# Get settings service to build the SSE URL
|
||||
settings_service = get_settings_service()
|
||||
host = getattr(settings_service.settings, "host", "localhost")
|
||||
port = getattr(settings_service.settings, "port", 3000)
|
||||
base_url = f"http://{host}:{port}".rstrip("/")
|
||||
project_sse_url = f"{base_url}/api/v1/mcp/project/{project_id}/sse"
|
||||
|
||||
# Handle WSL case - must match the logic in install function
|
||||
os_type = platform.system()
|
||||
is_wsl = os_type == "Linux" and "microsoft" in platform.uname().release.lower()
|
||||
|
||||
if is_wsl and host in {"localhost", "127.0.0.1"}:
|
||||
try:
|
||||
proc = await create_subprocess_exec(
|
||||
"/usr/bin/hostname",
|
||||
"-I",
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
)
|
||||
stdout, stderr = await proc.communicate()
|
||||
|
||||
if proc.returncode == 0 and stdout.strip():
|
||||
wsl_ip = stdout.decode().strip().split()[0] # Get first IP address
|
||||
logger.debug("Using WSL IP for external access: %s", wsl_ip)
|
||||
# Replace the localhost with the WSL IP in the URL
|
||||
project_sse_url = project_sse_url.replace(f"http://{host}:{port}", f"http://{wsl_ip}:{port}")
|
||||
except OSError as e:
|
||||
logger.warning("Failed to get WSL IP address: %s. Using default URL.", str(e))
|
||||
|
||||
return project_sse_url
|
||||
|
||||
|
||||
async def get_config_path(client: str) -> Path:
|
||||
"""Get the configuration file path for a given client and operating system."""
|
||||
os_type = platform.system()
|
||||
is_wsl = os_type == "Linux" and "microsoft" in platform.uname().release.lower()
|
||||
|
||||
if client.lower() == "cursor":
|
||||
return Path.home() / ".cursor" / "mcp.json"
|
||||
if client.lower() == "windsurf":
|
||||
return Path.home() / ".codeium" / "windsurf" / "mcp_config.json"
|
||||
if client.lower() == "claude":
|
||||
if os_type == "Darwin": # macOS
|
||||
return Path.home() / "Library" / "Application Support" / "Claude" / "claude_desktop_config.json"
|
||||
if os_type == "Windows" or is_wsl: # Windows or WSL (Claude runs on Windows host)
|
||||
if is_wsl:
|
||||
# In WSL, we need to access the Windows APPDATA directory
|
||||
try:
|
||||
# First try to get the Windows username
|
||||
proc = await create_subprocess_exec(
|
||||
"/mnt/c/Windows/System32/cmd.exe",
|
||||
"/c",
|
||||
"echo %USERNAME%",
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
)
|
||||
stdout, stderr = await proc.communicate()
|
||||
|
||||
if proc.returncode == 0 and stdout.strip():
|
||||
windows_username = stdout.decode().strip()
|
||||
return Path(
|
||||
f"/mnt/c/Users/{windows_username}/AppData/Roaming/Claude/claude_desktop_config.json"
|
||||
)
|
||||
|
||||
# Fallback: try to find the Windows user directory
|
||||
users_dir = Path("/mnt/c/Users")
|
||||
if users_dir.exists():
|
||||
# Get the first non-system user directory
|
||||
user_dirs = [
|
||||
d
|
||||
for d in users_dir.iterdir()
|
||||
if d.is_dir() and not d.name.startswith(("Default", "Public", "All Users"))
|
||||
]
|
||||
if user_dirs:
|
||||
return user_dirs[0] / "AppData" / "Roaming" / "Claude" / "claude_desktop_config.json"
|
||||
|
||||
if not Path("/mnt/c").exists():
|
||||
msg = "Windows C: drive not mounted at /mnt/c in WSL"
|
||||
raise ValueError(msg)
|
||||
|
||||
msg = "Could not find valid Windows user directory in WSL"
|
||||
raise ValueError(msg)
|
||||
except (OSError, CalledProcessError) as e:
|
||||
logger.warning("Failed to determine Windows user path in WSL: %s", str(e))
|
||||
msg = f"Could not determine Windows Claude config path in WSL: {e!s}"
|
||||
raise ValueError(msg) from e
|
||||
# Regular Windows
|
||||
return Path(os.environ["APPDATA"]) / "Claude" / "claude_desktop_config.json"
|
||||
|
||||
msg = "Unsupported operating system for Claude configuration"
|
||||
raise ValueError(msg)
|
||||
|
||||
msg = "Unsupported client"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
def remove_server_by_sse_url(config_data: dict, sse_url: str) -> tuple[dict, list[str]]:
|
||||
"""Remove any MCP servers that use the specified SSE URL from config data.
|
||||
|
||||
Returns:
|
||||
tuple: (updated_config, list_of_removed_server_names)
|
||||
"""
|
||||
if "mcpServers" not in config_data:
|
||||
return config_data, []
|
||||
|
||||
removed_servers: list[str] = []
|
||||
servers_to_remove: list[str] = []
|
||||
|
||||
# Find servers to remove
|
||||
for server_name, server_config in config_data["mcpServers"].items():
|
||||
args = server_config.get("args", [])
|
||||
if args and args[-1] == sse_url:
|
||||
servers_to_remove.append(server_name)
|
||||
|
||||
# Remove the servers
|
||||
for server_name in servers_to_remove:
|
||||
del config_data["mcpServers"][server_name]
|
||||
removed_servers.append(server_name)
|
||||
logger.debug("Removed existing server with matching SSE URL: %s", server_name)
|
||||
|
||||
return config_data, removed_servers
|
||||
|
||||
|
||||
# Project-specific MCP server instance for handling project-specific tools
|
||||
class ProjectMCPServer:
|
||||
def __init__(self, project_id: UUID):
|
||||
|
|
@ -750,11 +1035,11 @@ async def init_mcp_servers():
|
|||
try:
|
||||
get_project_sse(project.id)
|
||||
get_project_mcp_server(project.id)
|
||||
except Exception as e:
|
||||
except Exception as e: # noqa: BLE001
|
||||
msg = f"Failed to initialize MCP server for project {project.id}: {e}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
# Continue to next project even if this one fails
|
||||
|
||||
except Exception as e:
|
||||
except Exception as e: # noqa: BLE001
|
||||
msg = f"Failed to initialize MCP servers: {e}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ from typing import Any, ParamSpec, TypeVar
|
|||
from urllib.parse import quote, unquote, urlparse
|
||||
from uuid import uuid4
|
||||
|
||||
from loguru import logger
|
||||
from mcp import types
|
||||
from sqlmodel import select
|
||||
|
||||
|
|
@ -21,6 +20,7 @@ from langflow.api.v1.schemas import SimplifiedAPIRequest
|
|||
from langflow.base.mcp.constants import MAX_MCP_TOOL_NAME_LENGTH
|
||||
from langflow.base.mcp.util import get_flow_snake_case, get_unique_name, sanitize_mcp_name
|
||||
from langflow.helpers.flow import json_schema_from_flow
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.message import Message
|
||||
from langflow.services.database.models import Flow
|
||||
from langflow.services.database.models.user.model import User
|
||||
|
|
@ -43,7 +43,7 @@ def handle_mcp_errors(func: Callable[P, Awaitable[T]]) -> Callable[P, Awaitable[
|
|||
return await func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
msg = f"Error in {func.__name__}: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
|
@ -108,11 +108,11 @@ async def handle_list_resources(project_id=None):
|
|||
resources.append(resource)
|
||||
except FileNotFoundError as e:
|
||||
msg = f"Error listing files for flow {flow.id}: {e}"
|
||||
logger.debug(msg)
|
||||
await logger.adebug(msg)
|
||||
continue
|
||||
except Exception as e:
|
||||
msg = f"Error in listing resources: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise
|
||||
return resources
|
||||
|
||||
|
|
@ -150,7 +150,7 @@ async def handle_read_resource(uri: str) -> bytes:
|
|||
return base64.b64encode(content)
|
||||
except Exception as e:
|
||||
msg = f"Error reading resource {uri}: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise
|
||||
|
||||
|
||||
|
|
@ -271,7 +271,7 @@ async def handle_call_tool(
|
|||
return await with_db_session(execute_tool)
|
||||
except Exception as e:
|
||||
msg = f"Error executing tool {name}: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise
|
||||
|
||||
|
||||
|
|
@ -339,10 +339,10 @@ async def handle_list_tools(project_id=None, *, mcp_enabled_only=False):
|
|||
existing_names.add(name)
|
||||
except Exception as e: # noqa: BLE001
|
||||
msg = f"Error in listing tools: {e!s} from flow: {base_name}"
|
||||
logger.warning(msg)
|
||||
await logger.awarning(msg)
|
||||
continue
|
||||
except Exception as e:
|
||||
msg = f"Error in listing tools: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise
|
||||
return tools
|
||||
|
|
|
|||
545
src/backend/base/langflow/api/v1/openai_responses.py
Normal file
545
src/backend/base/langflow/api/v1/openai_responses.py
Normal file
|
|
@ -0,0 +1,545 @@
|
|||
import asyncio
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
from collections.abc import AsyncGenerator
|
||||
from typing import Annotated, Any
|
||||
|
||||
from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException, Request
|
||||
from fastapi.responses import StreamingResponse
|
||||
from loguru import logger
|
||||
|
||||
from langflow.api.v1.endpoints import consume_and_yield, run_flow_generator, simple_run_flow
|
||||
from langflow.api.v1.schemas import SimplifiedAPIRequest
|
||||
from langflow.events.event_manager import create_stream_tokens_event_manager
|
||||
from langflow.helpers.flow import get_flow_by_id_or_endpoint_name
|
||||
from langflow.schema.content_types import ToolContent
|
||||
from langflow.schema.openai_responses_schemas import (
|
||||
OpenAIErrorResponse,
|
||||
OpenAIResponsesRequest,
|
||||
OpenAIResponsesResponse,
|
||||
OpenAIResponsesStreamChunk,
|
||||
create_openai_error,
|
||||
)
|
||||
from langflow.services.auth.utils import api_key_security
|
||||
from langflow.services.database.models.flow.model import FlowRead
|
||||
from langflow.services.database.models.user.model import UserRead
|
||||
from langflow.services.deps import get_telemetry_service
|
||||
from langflow.services.telemetry.schema import RunPayload
|
||||
from langflow.services.telemetry.service import TelemetryService
|
||||
|
||||
router = APIRouter(tags=["OpenAI Responses API"])
|
||||
|
||||
|
||||
def has_chat_input(flow_data: dict | None) -> bool:
|
||||
"""Check if the flow has a chat input component."""
|
||||
if not flow_data or "nodes" not in flow_data:
|
||||
return False
|
||||
|
||||
return any(node.get("data", {}).get("type") in ["ChatInput", "Chat Input"] for node in flow_data["nodes"])
|
||||
|
||||
|
||||
def has_chat_output(flow_data: dict | None) -> bool:
|
||||
"""Check if the flow has a chat input component."""
|
||||
if not flow_data or "nodes" not in flow_data:
|
||||
return False
|
||||
|
||||
return any(node.get("data", {}).get("type") in ["ChatOutput", "Chat Output"] for node in flow_data["nodes"])
|
||||
|
||||
|
||||
async def run_flow_for_openai_responses(
|
||||
flow: FlowRead,
|
||||
request: OpenAIResponsesRequest,
|
||||
api_key_user: UserRead,
|
||||
*,
|
||||
stream: bool = False,
|
||||
variables: dict[str, str] | None = None,
|
||||
) -> OpenAIResponsesResponse | StreamingResponse:
|
||||
"""Run a flow for OpenAI Responses API compatibility."""
|
||||
# Check if flow has chat input
|
||||
if not has_chat_input(flow.data):
|
||||
msg = "Flow must have a ChatInput component to be compatible with OpenAI Responses API"
|
||||
raise ValueError(msg)
|
||||
|
||||
if not has_chat_output(flow.data):
|
||||
msg = "Flow must have a ChatOutput component to be compatible with OpenAI Responses API"
|
||||
raise ValueError(msg)
|
||||
|
||||
# Use previous_response_id as session_id for conversation continuity
|
||||
# If no previous_response_id, create a new session_id
|
||||
session_id = request.previous_response_id or str(uuid.uuid4())
|
||||
|
||||
# Store header variables in context for global variable override
|
||||
context = {}
|
||||
if variables:
|
||||
context["request_variables"] = variables
|
||||
logger.debug(f"Added request variables to context: {variables}")
|
||||
|
||||
# Convert OpenAI request to SimplifiedAPIRequest
|
||||
# Note: We're moving away from tweaks to a context-based approach
|
||||
simplified_request = SimplifiedAPIRequest(
|
||||
input_value=request.input,
|
||||
input_type="chat", # Use chat input type for better compatibility
|
||||
output_type="chat", # Use chat output type for better compatibility
|
||||
tweaks={}, # Empty tweaks, using context instead
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Context will be passed separately to simple_run_flow
|
||||
|
||||
logger.debug(f"SimplifiedAPIRequest created with context: {context}")
|
||||
|
||||
# Use session_id as response_id for OpenAI compatibility
|
||||
response_id = session_id
|
||||
created_timestamp = int(time.time())
|
||||
|
||||
if stream:
|
||||
# Handle streaming response
|
||||
asyncio_queue: asyncio.Queue = asyncio.Queue()
|
||||
asyncio_queue_client_consumed: asyncio.Queue = asyncio.Queue()
|
||||
event_manager = create_stream_tokens_event_manager(queue=asyncio_queue)
|
||||
|
||||
async def openai_stream_generator() -> AsyncGenerator[str, None]:
|
||||
"""Convert Langflow events to OpenAI Responses API streaming format."""
|
||||
main_task = asyncio.create_task(
|
||||
run_flow_generator(
|
||||
flow=flow,
|
||||
input_request=simplified_request,
|
||||
api_key_user=api_key_user,
|
||||
event_manager=event_manager,
|
||||
client_consumed_queue=asyncio_queue_client_consumed,
|
||||
context=context,
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
# Send initial chunk to establish connection
|
||||
initial_chunk = OpenAIResponsesStreamChunk(
|
||||
id=response_id,
|
||||
created=created_timestamp,
|
||||
model=request.model,
|
||||
delta={"content": ""},
|
||||
)
|
||||
yield f"data: {initial_chunk.model_dump_json()}\n\n"
|
||||
|
||||
tool_call_counter = 0
|
||||
processed_tools = set() # Track processed tool calls to avoid duplicates
|
||||
previous_content = "" # Track content already sent to calculate deltas
|
||||
|
||||
async for event_data in consume_and_yield(asyncio_queue, asyncio_queue_client_consumed):
|
||||
if event_data is None:
|
||||
break
|
||||
|
||||
content = ""
|
||||
|
||||
# Parse byte string events as JSON
|
||||
if isinstance(event_data, bytes):
|
||||
try:
|
||||
import json
|
||||
|
||||
event_str = event_data.decode("utf-8")
|
||||
parsed_event = json.loads(event_str)
|
||||
|
||||
if isinstance(parsed_event, dict):
|
||||
event_type = parsed_event.get("event")
|
||||
data = parsed_event.get("data", {})
|
||||
|
||||
# Handle add_message events
|
||||
if event_type == "add_message":
|
||||
sender_name = data.get("sender_name", "")
|
||||
text = data.get("text", "")
|
||||
sender = data.get("sender", "")
|
||||
content_blocks = data.get("content_blocks", [])
|
||||
|
||||
# Look for Agent Steps in content_blocks
|
||||
for block in content_blocks:
|
||||
if block.get("title") == "Agent Steps":
|
||||
contents = block.get("contents", [])
|
||||
for step in contents:
|
||||
# Look for tool_use type items
|
||||
if step.get("type") == "tool_use":
|
||||
tool_name = step.get("name", "")
|
||||
tool_input = step.get("tool_input", {})
|
||||
tool_output = step.get("output")
|
||||
|
||||
# Only emit tool calls with explicit tool names and
|
||||
# meaningful arguments
|
||||
if tool_name and tool_input is not None and tool_output is not None:
|
||||
# Create unique identifier for this tool call
|
||||
tool_signature = (
|
||||
f"{tool_name}:{hash(str(sorted(tool_input.items())))}"
|
||||
)
|
||||
|
||||
# Skip if we've already processed this tool call
|
||||
if tool_signature in processed_tools:
|
||||
continue
|
||||
|
||||
processed_tools.add(tool_signature)
|
||||
tool_call_counter += 1
|
||||
call_id = f"call_{tool_call_counter}"
|
||||
tool_id = f"fc_{tool_call_counter}"
|
||||
tool_call_event = {
|
||||
"type": "response.output_item.added",
|
||||
"item": {
|
||||
"id": tool_id,
|
||||
"type": "function_call", # OpenAI uses "function_call"
|
||||
"status": "in_progress", # OpenAI includes status
|
||||
"name": tool_name,
|
||||
"arguments": "", # Start with empty, build via deltas
|
||||
"call_id": call_id,
|
||||
},
|
||||
}
|
||||
yield (
|
||||
f"event: response.output_item.added\n"
|
||||
f"data: {json.dumps(tool_call_event)}\n\n"
|
||||
)
|
||||
|
||||
# Send function call arguments as delta events (like OpenAI)
|
||||
arguments_str = json.dumps(tool_input)
|
||||
arg_delta_event = {
|
||||
"type": "response.function_call_arguments.delta",
|
||||
"delta": arguments_str,
|
||||
"item_id": tool_id,
|
||||
"output_index": 0,
|
||||
}
|
||||
yield (
|
||||
f"event: response.function_call_arguments.delta\n"
|
||||
f"data: {json.dumps(arg_delta_event)}\n\n"
|
||||
)
|
||||
|
||||
# Send function call arguments done event
|
||||
arg_done_event = {
|
||||
"type": "response.function_call_arguments.done",
|
||||
"arguments": arguments_str,
|
||||
"item_id": tool_id,
|
||||
"output_index": 0,
|
||||
}
|
||||
yield (
|
||||
f"event: response.function_call_arguments.done\n"
|
||||
f"data: {json.dumps(arg_done_event)}\n\n"
|
||||
)
|
||||
|
||||
# If there's output, send completion event
|
||||
if tool_output is not None:
|
||||
# Check if include parameter requests tool_call.results
|
||||
include_results = (
|
||||
request.include
|
||||
and "tool_call.results" in request.include
|
||||
)
|
||||
|
||||
if include_results:
|
||||
# Format with detailed results
|
||||
tool_done_event = {
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"id": f"{tool_name}_{tool_id}",
|
||||
"inputs": tool_input, # Raw inputs as-is
|
||||
"status": "completed",
|
||||
"type": "tool_call",
|
||||
"tool_name": f"{tool_name}",
|
||||
"results": tool_output, # Raw output as-is
|
||||
},
|
||||
"output_index": 0,
|
||||
"sequence_number": tool_call_counter + 5,
|
||||
}
|
||||
else:
|
||||
# Regular function call format
|
||||
tool_done_event = {
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"id": tool_id,
|
||||
"type": "function_call", # Match OpenAI format
|
||||
"status": "completed",
|
||||
"arguments": arguments_str,
|
||||
"call_id": call_id,
|
||||
"name": tool_name,
|
||||
},
|
||||
}
|
||||
|
||||
yield (
|
||||
f"event: response.output_item.done\n"
|
||||
f"data: {json.dumps(tool_done_event)}\n\n"
|
||||
)
|
||||
|
||||
# Extract text content for streaming (only AI responses)
|
||||
if (
|
||||
sender in ["Machine", "AI", "Agent"]
|
||||
and text != request.input
|
||||
and sender_name == "Agent"
|
||||
):
|
||||
# Calculate delta: only send newly generated content
|
||||
if text.startswith(previous_content):
|
||||
content = text[len(previous_content) :]
|
||||
previous_content = text
|
||||
else:
|
||||
# If text doesn't start with previous content, send full text
|
||||
# This handles cases where the content might be reset
|
||||
content = text
|
||||
previous_content = text
|
||||
|
||||
except (json.JSONDecodeError, UnicodeDecodeError):
|
||||
continue
|
||||
|
||||
# Only send chunks with actual content
|
||||
if content:
|
||||
chunk = OpenAIResponsesStreamChunk(
|
||||
id=response_id,
|
||||
created=created_timestamp,
|
||||
model=request.model,
|
||||
delta={"content": content},
|
||||
)
|
||||
yield f"data: {chunk.model_dump_json()}\n\n"
|
||||
|
||||
# Send final completion chunk
|
||||
final_chunk = OpenAIResponsesStreamChunk(
|
||||
id=response_id,
|
||||
created=created_timestamp,
|
||||
model=request.model,
|
||||
delta={},
|
||||
status="completed",
|
||||
)
|
||||
yield f"data: {final_chunk.model_dump_json()}\n\n"
|
||||
yield "data: [DONE]\n\n"
|
||||
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.error(f"Error in stream generator: {e}")
|
||||
error_response = create_openai_error(
|
||||
message=str(e),
|
||||
type_="processing_error",
|
||||
)
|
||||
yield f"data: {error_response}\n\n"
|
||||
finally:
|
||||
if not main_task.done():
|
||||
main_task.cancel()
|
||||
|
||||
return StreamingResponse(
|
||||
openai_stream_generator(),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"Access-Control-Allow-Origin": "*",
|
||||
},
|
||||
)
|
||||
|
||||
# Handle non-streaming response
|
||||
result = await simple_run_flow(
|
||||
flow=flow,
|
||||
input_request=simplified_request,
|
||||
stream=False,
|
||||
api_key_user=api_key_user,
|
||||
context=context,
|
||||
)
|
||||
|
||||
# Extract output text and tool calls from result
|
||||
output_text = ""
|
||||
tool_calls: list[dict[str, Any]] = []
|
||||
|
||||
if result.outputs:
|
||||
for run_output in result.outputs:
|
||||
if run_output and run_output.outputs:
|
||||
for component_output in run_output.outputs:
|
||||
if component_output:
|
||||
# Handle messages (final chat outputs)
|
||||
if hasattr(component_output, "messages") and component_output.messages:
|
||||
for msg in component_output.messages:
|
||||
if hasattr(msg, "message"):
|
||||
output_text = msg.message
|
||||
break
|
||||
# Handle results
|
||||
if not output_text and hasattr(component_output, "results") and component_output.results:
|
||||
for value in component_output.results.values():
|
||||
if hasattr(value, "get_text"):
|
||||
output_text = value.get_text()
|
||||
break
|
||||
if isinstance(value, str):
|
||||
output_text = value
|
||||
break
|
||||
|
||||
if hasattr(component_output, "results") and component_output.results:
|
||||
for blocks in component_output.results.get("message", {}).content_blocks:
|
||||
tool_calls.extend(
|
||||
{
|
||||
"name": content.name,
|
||||
"input": content.tool_input,
|
||||
"output": content.output,
|
||||
}
|
||||
for content in blocks.contents
|
||||
if isinstance(content, ToolContent)
|
||||
)
|
||||
if output_text:
|
||||
break
|
||||
if output_text:
|
||||
break
|
||||
|
||||
# Build output array
|
||||
output_items = []
|
||||
|
||||
# Add tool calls if includes parameter requests them
|
||||
include_results = request.include and "tool_call.results" in request.include
|
||||
|
||||
tool_call_id_counter = 1
|
||||
for tool_call in tool_calls:
|
||||
if include_results:
|
||||
# Format as detailed tool call with results (like file_search_call in sample)
|
||||
tool_call_item = {
|
||||
"id": f"{tool_call['name']}_{tool_call_id_counter}",
|
||||
"queries": list(tool_call["input"].values())
|
||||
if isinstance(tool_call["input"], dict)
|
||||
else [str(tool_call["input"])],
|
||||
"status": "completed",
|
||||
"tool_name": f"{tool_call['name']}",
|
||||
"type": "tool_call",
|
||||
"results": tool_call["output"] if tool_call["output"] is not None else [],
|
||||
}
|
||||
else:
|
||||
# Format as basic function call
|
||||
tool_call_item = {
|
||||
"id": f"fc_{tool_call_id_counter}",
|
||||
"type": "function_call",
|
||||
"status": "completed",
|
||||
"name": tool_call["name"],
|
||||
"arguments": json.dumps(tool_call["input"]) if tool_call["input"] is not None else "{}",
|
||||
}
|
||||
|
||||
output_items.append(tool_call_item)
|
||||
tool_call_id_counter += 1
|
||||
|
||||
# Add the message output
|
||||
output_message = {
|
||||
"type": "message",
|
||||
"id": f"msg_{response_id}",
|
||||
"status": "completed",
|
||||
"role": "assistant",
|
||||
"content": [{"type": "output_text", "text": output_text, "annotations": []}],
|
||||
}
|
||||
output_items.append(output_message)
|
||||
|
||||
return OpenAIResponsesResponse(
|
||||
id=response_id,
|
||||
created_at=created_timestamp,
|
||||
model=request.model,
|
||||
output=output_items,
|
||||
previous_response_id=request.previous_response_id,
|
||||
)
|
||||
|
||||
|
||||
@router.post("/responses", response_model=None)
|
||||
async def create_response(
|
||||
request: OpenAIResponsesRequest,
|
||||
background_tasks: BackgroundTasks,
|
||||
api_key_user: Annotated[UserRead, Depends(api_key_security)],
|
||||
telemetry_service: Annotated[TelemetryService, Depends(get_telemetry_service)],
|
||||
http_request: Request,
|
||||
) -> OpenAIResponsesResponse | StreamingResponse | OpenAIErrorResponse:
|
||||
"""Create a response using OpenAI Responses API format.
|
||||
|
||||
This endpoint accepts a flow_id in the model parameter and processes
|
||||
the input through the specified Langflow flow.
|
||||
|
||||
Args:
|
||||
request: OpenAI Responses API request with model (flow_id) and input
|
||||
background_tasks: FastAPI background task manager
|
||||
api_key_user: Authenticated user from API key
|
||||
http_request: The incoming HTTP request
|
||||
telemetry_service: Telemetry service for logging
|
||||
|
||||
Returns:
|
||||
OpenAI-compatible response or streaming response
|
||||
|
||||
Raises:
|
||||
HTTPException: For validation errors or flow execution issues
|
||||
"""
|
||||
start_time = time.perf_counter()
|
||||
|
||||
# Extract global variables from X-LANGFLOW-GLOBAL-VAR-* headers
|
||||
variables = {}
|
||||
header_prefix = "x-langflow-global-var-"
|
||||
|
||||
logger.debug(f"All headers received: {list(http_request.headers.keys())}")
|
||||
logger.debug(f"Looking for headers starting with: {header_prefix}")
|
||||
|
||||
for header_name, header_value in http_request.headers.items():
|
||||
header_lower = header_name.lower()
|
||||
logger.debug(f"Checking header: '{header_lower}' (original: '{header_name}')")
|
||||
if header_lower.startswith(header_prefix):
|
||||
# Extract variable name from header (remove prefix) and convert to uppercase
|
||||
var_name_lower = header_lower[len(header_prefix) :]
|
||||
var_name = var_name_lower.upper() # Default to uppercase
|
||||
|
||||
variables[var_name] = header_value
|
||||
logger.debug(
|
||||
f"Found global variable: {var_name} = {header_value} "
|
||||
f"(converted to uppercase from header: {header_name})"
|
||||
)
|
||||
|
||||
logger.debug(f"Extracted global variables from headers: {list(variables.keys())}")
|
||||
logger.debug(f"Variables dict: {variables}")
|
||||
|
||||
# Validate tools parameter - error out if tools are provided
|
||||
if request.tools is not None:
|
||||
error_response = create_openai_error(
|
||||
message="Tools are not supported yet",
|
||||
type_="invalid_request_error",
|
||||
code="tools_not_supported",
|
||||
)
|
||||
return OpenAIErrorResponse(error=error_response["error"])
|
||||
|
||||
# Get flow using the model field (which contains flow_id)
|
||||
try:
|
||||
flow = await get_flow_by_id_or_endpoint_name(request.model, str(api_key_user.id))
|
||||
except HTTPException:
|
||||
flow = None
|
||||
|
||||
if flow is None:
|
||||
error_response = create_openai_error(
|
||||
message=f"Flow with id '{request.model}' not found",
|
||||
type_="invalid_request_error",
|
||||
code="flow_not_found",
|
||||
)
|
||||
return OpenAIErrorResponse(error=error_response["error"])
|
||||
|
||||
try:
|
||||
# Process the request
|
||||
result = await run_flow_for_openai_responses(
|
||||
flow=flow,
|
||||
request=request,
|
||||
api_key_user=api_key_user,
|
||||
stream=request.stream,
|
||||
variables=variables,
|
||||
)
|
||||
|
||||
# Log telemetry for successful completion
|
||||
if not request.stream: # Only log for non-streaming responses
|
||||
end_time = time.perf_counter()
|
||||
background_tasks.add_task(
|
||||
telemetry_service.log_package_run,
|
||||
RunPayload(
|
||||
run_is_webhook=False,
|
||||
run_seconds=int(end_time - start_time),
|
||||
run_success=True,
|
||||
run_error_message="",
|
||||
),
|
||||
)
|
||||
|
||||
except Exception as exc: # noqa: BLE001
|
||||
logger.error(f"Error processing OpenAI Responses request: {exc}")
|
||||
|
||||
# Log telemetry for failed completion
|
||||
background_tasks.add_task(
|
||||
telemetry_service.log_package_run,
|
||||
RunPayload(
|
||||
run_is_webhook=False,
|
||||
run_seconds=int(time.perf_counter() - start_time),
|
||||
run_success=False,
|
||||
run_error_message=str(exc),
|
||||
),
|
||||
)
|
||||
|
||||
# Return OpenAI-compatible error
|
||||
error_response = create_openai_error(
|
||||
message=str(exc),
|
||||
type_="processing_error",
|
||||
)
|
||||
return OpenAIErrorResponse(error=error_response["error"])
|
||||
return result
|
||||
|
|
@ -407,13 +407,15 @@ class ConfigResponse(BaseModel):
|
|||
public_flow_cleanup_interval: int
|
||||
public_flow_expiration: int
|
||||
event_delivery: Literal["polling", "streaming", "direct"]
|
||||
webhook_auth_enable: bool
|
||||
|
||||
@classmethod
|
||||
def from_settings(cls, settings: Settings) -> "ConfigResponse":
|
||||
"""Create a ConfigResponse instance using values from a Settings object and global feature flags.
|
||||
def from_settings(cls, settings: Settings, auth_settings) -> "ConfigResponse":
|
||||
"""Create a ConfigResponse instance using values from a Settings object and AuthSettings.
|
||||
|
||||
Parameters:
|
||||
settings (Settings): The Settings object containing configuration values.
|
||||
auth_settings: The AuthSettings object containing authentication configuration values.
|
||||
|
||||
Returns:
|
||||
ConfigResponse: An instance populated with configuration and feature flag values.
|
||||
|
|
@ -431,6 +433,7 @@ class ConfigResponse(BaseModel):
|
|||
public_flow_cleanup_interval=settings.public_flow_cleanup_interval,
|
||||
public_flow_expiration=settings.public_flow_expiration,
|
||||
event_delivery=settings.event_delivery,
|
||||
webhook_auth_enable=auth_settings.WEBHOOK_AUTH_ENABLE,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -444,18 +447,13 @@ class CancelFlowResponse(BaseModel):
|
|||
class AuthSettings(BaseModel):
|
||||
"""Model representing authentication settings for MCP."""
|
||||
|
||||
auth_type: Literal["none", "apikey", "basic", "bearer", "iam", "oauth"] = "none"
|
||||
api_key: SecretStr | None = None
|
||||
username: str | None = None
|
||||
password: SecretStr | None = None
|
||||
bearer_token: SecretStr | None = None
|
||||
iam_endpoint: str | None = None
|
||||
auth_type: Literal["none", "apikey", "oauth"] = "none"
|
||||
oauth_host: str | None = None
|
||||
oauth_port: str | None = None
|
||||
oauth_server_url: str | None = None
|
||||
oauth_callback_path: str | None = None
|
||||
oauth_client_id: str | None = None
|
||||
oauth_client_secret: str | None = None
|
||||
oauth_client_secret: SecretStr | None = None
|
||||
oauth_auth_url: str | None = None
|
||||
oauth_token_url: str | None = None
|
||||
oauth_mcp_scope: str | None = None
|
||||
|
|
|
|||
|
|
@ -2,9 +2,9 @@ from typing import Annotated
|
|||
from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from loguru import logger
|
||||
|
||||
from langflow.api.utils import CurrentActiveUser, check_langflow_version
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.services.auth import utils as auth_utils
|
||||
from langflow.services.deps import get_settings_service, get_store_service
|
||||
from langflow.services.store.exceptions import CustomError
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
from fastapi import APIRouter, HTTPException
|
||||
from loguru import logger
|
||||
|
||||
from langflow.api.utils import CurrentActiveUser
|
||||
from langflow.api.v1.base import Code, CodeValidationResponse, PromptValidationResponse, ValidatePromptRequest
|
||||
from langflow.base.prompts.api_utils import process_prompt_template
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.utils.validate import validate_code
|
||||
|
||||
# build router
|
||||
|
|
@ -19,7 +19,7 @@ async def post_validate_code(code: Code, _current_user: CurrentActiveUser) -> Co
|
|||
function=errors.get("function", {}),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.opt(exception=True).debug("Error validating code")
|
||||
logger.debug("Error validating code", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e)) from e
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -33,11 +33,7 @@ from langflow.services.database.models.flow.model import Flow
|
|||
from langflow.services.database.models.message.model import MessageTable
|
||||
from langflow.services.database.models.user.model import User
|
||||
from langflow.services.deps import get_variable_service, session_scope
|
||||
from langflow.utils.voice_utils import (
|
||||
BYTES_PER_24K_FRAME,
|
||||
VAD_SAMPLE_RATE_16K,
|
||||
resample_24k_to_16k,
|
||||
)
|
||||
from langflow.utils.voice_utils import BYTES_PER_24K_FRAME, VAD_SAMPLE_RATE_16K, resample_24k_to_16k
|
||||
|
||||
router = APIRouter(prefix="/voice", tags=["Voice"])
|
||||
|
||||
|
|
@ -121,8 +117,8 @@ async def authenticate_and_get_openai_key(session: DbSession, user: User, websoc
|
|||
)
|
||||
return None, None
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.error(f"Error with API key: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(f"Error with API key: {e}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
return None, None
|
||||
return user, openai_key
|
||||
|
||||
|
|
@ -185,13 +181,13 @@ class ElevenLabsClientManager:
|
|||
session=session,
|
||||
)
|
||||
except (InvalidToken, ValueError) as e:
|
||||
logger.error(f"Error with ElevenLabs API key: {e}")
|
||||
await logger.aerror(f"Error with ElevenLabs API key: {e}")
|
||||
cls._api_key = os.getenv("ELEVENLABS_API_KEY", "")
|
||||
if not cls._api_key:
|
||||
logger.error("ElevenLabs API key not found")
|
||||
await logger.aerror("ElevenLabs API key not found")
|
||||
return None
|
||||
except (KeyError, AttributeError, sqlalchemy.exc.SQLAlchemyError) as e:
|
||||
logger.error(f"Exception getting ElevenLabs API key: {e}")
|
||||
await logger.aerror(f"Exception getting ElevenLabs API key: {e}")
|
||||
return None
|
||||
|
||||
if cls._api_key:
|
||||
|
|
@ -310,25 +306,25 @@ async def process_message_queue(queue_key, session):
|
|||
|
||||
try:
|
||||
await aadd_messagetables([message], session)
|
||||
logger.debug(f"Added message to DB: {message.text[:30]}...")
|
||||
await logger.adebug(f"Added message to DB: {message.text[:30]}...")
|
||||
except ValueError as e:
|
||||
logger.error(f"Error saving message to database (ValueError): {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(f"Error saving message to database (ValueError): {e}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
except sqlalchemy.exc.SQLAlchemyError as e:
|
||||
logger.error(f"Error saving message to database (SQLAlchemyError): {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(f"Error saving message to database (SQLAlchemyError): {e}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
except (KeyError, AttributeError, TypeError) as e:
|
||||
# More specific exceptions instead of blind Exception
|
||||
logger.error(f"Error saving message to database: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(f"Error saving message to database: {e}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
finally:
|
||||
message_queues[queue_key].task_done()
|
||||
|
||||
if message_queues[queue_key].empty():
|
||||
break
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.debug(f"Message queue processor for {queue_key} was cancelled: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.adebug(f"Message queue processor for {queue_key} was cancelled: {e}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
|
||||
|
||||
class SendQueues:
|
||||
|
|
@ -369,7 +365,7 @@ class SendQueues:
|
|||
logger.trace("OPENAI BLOCKING")
|
||||
# log_event(msg, DIRECTION_TO_OPENAI)
|
||||
except Exception: # noqa: BLE001
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(traceback.format_exc())
|
||||
|
||||
def client_send(self, payload):
|
||||
try:
|
||||
|
|
@ -387,7 +383,7 @@ class SendQueues:
|
|||
self.log_event(msg, LF_TO_CLIENT)
|
||||
await self.client_ws.send_text(json.dumps(msg))
|
||||
except Exception: # noqa: BLE001
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(traceback.format_exc())
|
||||
|
||||
async def close(self):
|
||||
self.openai_send_q.put_nowait(None)
|
||||
|
|
@ -462,7 +458,7 @@ async def handle_function_call(
|
|||
create_response()
|
||||
except json.JSONDecodeError as e:
|
||||
trace = traceback.format_exc()
|
||||
logger.error(f"JSON decode error: {e!s}\ntrace: {trace}")
|
||||
await logger.aerror(f"JSON decode error: {e!s}\ntrace: {trace}")
|
||||
function_output = {
|
||||
"type": "conversation.item.create",
|
||||
"item": {
|
||||
|
|
@ -474,7 +470,7 @@ async def handle_function_call(
|
|||
msg_handler.openai_send(function_output)
|
||||
except ValueError as e:
|
||||
trace = traceback.format_exc()
|
||||
logger.error(f"Value error: {e!s}\ntrace: {trace}")
|
||||
await logger.aerror(f"Value error: {e!s}\ntrace: {trace}")
|
||||
function_output = {
|
||||
"type": "conversation.item.create",
|
||||
"item": {
|
||||
|
|
@ -486,7 +482,7 @@ async def handle_function_call(
|
|||
msg_handler.openai_send(function_output)
|
||||
except (ConnectionError, websockets.exceptions.WebSocketException) as e:
|
||||
trace = traceback.format_exc()
|
||||
logger.error(f"Connection error: {e!s}\ntrace: {trace}")
|
||||
await logger.aerror(f"Connection error: {e!s}\ntrace: {trace}")
|
||||
function_output = {
|
||||
"type": "conversation.item.create",
|
||||
"item": {
|
||||
|
|
@ -497,8 +493,8 @@ async def handle_function_call(
|
|||
}
|
||||
msg_handler.openai_send(function_output)
|
||||
except (KeyError, AttributeError, TypeError) as e:
|
||||
logger.error(f"Error executing flow: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(f"Error executing flow: {e}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
function_output = {
|
||||
"type": "conversation.item.create",
|
||||
"item": {
|
||||
|
|
@ -751,7 +747,7 @@ async def flow_as_tool_websocket(
|
|||
except Exception as e: # noqa: BLE001
|
||||
err_msg = {"error": f"Failed to load flow: {e!s}"}
|
||||
await client_websocket.send_json(err_msg)
|
||||
logger.error(f"Failed to load flow: {e}")
|
||||
await logger.aerror(f"Failed to load flow: {e}")
|
||||
return
|
||||
|
||||
url = "wss://api.openai.com/v1/realtime?model=gpt-4o-mini-realtime-preview"
|
||||
|
|
@ -800,7 +796,7 @@ async def flow_as_tool_websocket(
|
|||
msg_handler.openai_send({"type": "response.cancel"})
|
||||
bot_speaking_flag[0] = False
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.error(f"[ERROR] VAD processing failed (ValueError): {e}")
|
||||
await logger.aerror(f"[ERROR] VAD processing failed (ValueError): {e}")
|
||||
continue
|
||||
if has_speech:
|
||||
last_speech_time = datetime.now(tz=timezone.utc)
|
||||
|
|
@ -856,7 +852,7 @@ async def flow_as_tool_websocket(
|
|||
return new_session
|
||||
|
||||
class Response:
|
||||
def __init__(self, response_id: str, use_elevenlabs: bool | None = None):
|
||||
def __init__(self, response_id: str, *, use_elevenlabs: bool | None = None):
|
||||
if use_elevenlabs is None:
|
||||
use_elevenlabs = False
|
||||
self.response_id = response_id
|
||||
|
|
@ -925,7 +921,7 @@ async def flow_as_tool_websocket(
|
|||
# client_send_event_from_thread(event, main_loop)
|
||||
msg_handler.client_send(event)
|
||||
except Exception: # noqa: BLE001
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(traceback.format_exc())
|
||||
|
||||
async def forward_to_openai() -> None:
|
||||
nonlocal openai_realtime_session
|
||||
|
|
@ -954,10 +950,10 @@ async def flow_as_tool_websocket(
|
|||
msg_handler.openai_send(msg)
|
||||
num_audio_samples = 0
|
||||
elif msg.get("type") == "langflow.voice_mode.config":
|
||||
logger.info(f"langflow.voice_mode.config {msg}")
|
||||
await logger.ainfo(f"langflow.voice_mode.config {msg}")
|
||||
voice_config.progress_enabled = msg.get("progress_enabled", True)
|
||||
elif msg.get("type") == "langflow.elevenlabs.config":
|
||||
logger.info(f"langflow.elevenlabs.config {msg}")
|
||||
await logger.ainfo(f"langflow.elevenlabs.config {msg}")
|
||||
voice_config.use_elevenlabs = msg["enabled"]
|
||||
voice_config.elevenlabs_voice = msg.get("voice_id", voice_config.elevenlabs_voice)
|
||||
|
||||
|
|
@ -997,7 +993,7 @@ async def flow_as_tool_websocket(
|
|||
if do_forward:
|
||||
msg_handler.client_send(event)
|
||||
if event_type == "response.created":
|
||||
responses[response_id] = Response(response_id, voice_config.use_elevenlabs)
|
||||
responses[response_id] = Response(response_id, use_elevenlabs=voice_config.use_elevenlabs)
|
||||
if function_call:
|
||||
if function_call.is_prog_enabled and not function_call.prog_rsp_id:
|
||||
function_call.prog_rsp_id = response_id
|
||||
|
|
@ -1021,12 +1017,12 @@ async def flow_as_tool_websocket(
|
|||
message_text = event.get("text", "")
|
||||
await add_message_to_db(message_text, session, flow_id, session_id, "Machine", "AI")
|
||||
except ValueError as err:
|
||||
logger.error(f"Error saving message to database (ValueError): {err}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(f"Error saving message to database (ValueError): {err}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
except (KeyError, AttributeError, TypeError) as err:
|
||||
# Replace blind Exception with specific exceptions
|
||||
logger.error(f"Error saving message to database: {err}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(f"Error saving message to database: {err}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
|
||||
elif event_type == "response.output_item.added":
|
||||
bot_speaking_flag[0] = True
|
||||
|
|
@ -1050,12 +1046,12 @@ async def flow_as_tool_websocket(
|
|||
if transcript and transcript.strip():
|
||||
await add_message_to_db(transcript, session, flow_id, session_id, "Machine", "AI")
|
||||
except ValueError as err:
|
||||
logger.error(f"Error saving message to database (ValueError): {err}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(f"Error saving message to database (ValueError): {err}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
except (KeyError, AttributeError, TypeError) as err:
|
||||
# Replace blind Exception with specific exceptions
|
||||
logger.error(f"Error saving message to database: {err}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(f"Error saving message to database: {err}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
bot_speaking_flag[0] = False
|
||||
elif event_type == "response.done":
|
||||
msg_handler.openai_unblock()
|
||||
|
|
@ -1080,12 +1076,12 @@ async def flow_as_tool_websocket(
|
|||
if message_text and message_text.strip():
|
||||
await add_message_to_db(message_text, session, flow_id, session_id, "User", "User")
|
||||
except ValueError as e:
|
||||
logger.error(f"Error saving message to database (ValueError): {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(f"Error saving message to database (ValueError): {e}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
except (KeyError, AttributeError, TypeError) as e:
|
||||
# Replace blind Exception with specific exceptions
|
||||
logger.error(f"Error saving message to database: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(f"Error saving message to database: {e}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
elif event_type == "error":
|
||||
pass
|
||||
|
||||
|
|
@ -1104,12 +1100,12 @@ async def flow_as_tool_websocket(
|
|||
# Check for exceptions in results
|
||||
for result in results:
|
||||
if isinstance(result, Exception):
|
||||
logger.error("WS loop failed:", exc_info=result)
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror("WS loop failed:", exc_info=result)
|
||||
await logger.aerror(traceback.format_exc())
|
||||
except Exception as e: # noqa: BLE001
|
||||
# Handle any other exceptions
|
||||
logger.error(f"WS loop failed: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(f"WS loop failed: {e}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
finally:
|
||||
# shared cleanup for writers & sockets
|
||||
async def close():
|
||||
|
|
@ -1119,8 +1115,8 @@ async def flow_as_tool_websocket(
|
|||
|
||||
await close()
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.error(f"Unexpected error: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(f"Unexpected error: {e}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
finally:
|
||||
# Make sure to clean up the task
|
||||
if vad_task and not vad_task.done():
|
||||
|
|
@ -1232,16 +1228,16 @@ async def flow_tts_websocket(
|
|||
elif event.get("type") == "input_audio_buffer.commit":
|
||||
openai_send(event)
|
||||
elif event.get("type") == "langflow.elevenlabs.config":
|
||||
logger.info(f"langflow.elevenlabs.config {event}")
|
||||
await logger.ainfo(f"langflow.elevenlabs.config {event}")
|
||||
tts_config.use_elevenlabs = event["enabled"]
|
||||
tts_config.elevenlabs_voice = event.get("voice_id", tts_config.elevenlabs_voice)
|
||||
elif event.get("type") == "voice.settings":
|
||||
# Store the voice setting
|
||||
if event.get("voice"):
|
||||
tts_config.openai_voice = event.get("voice")
|
||||
logger.info(f"Updated OpenAI voice to: {tts_config.openai_voice}")
|
||||
await logger.ainfo(f"Updated OpenAI voice to: {tts_config.openai_voice}")
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.error(f"Error in WebSocket communication: {e}")
|
||||
await logger.aerror(f"Error in WebSocket communication: {e}")
|
||||
|
||||
async def forward_to_client() -> None:
|
||||
try:
|
||||
|
|
@ -1312,7 +1308,7 @@ async def flow_tts_websocket(
|
|||
audio_event = {"type": "response.audio.delta", "delta": base64_audio}
|
||||
client_send(audio_event)
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.error(f"Error in WebSocket communication: {e}")
|
||||
await logger.aerror(f"Error in WebSocket communication: {e}")
|
||||
|
||||
try:
|
||||
# Create tasks and gather them for concurrent execution
|
||||
|
|
@ -1321,13 +1317,13 @@ async def flow_tts_websocket(
|
|||
await asyncio.gather(task1, task2)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
# handle any exceptions from any task
|
||||
logger.error("WS loop failed:", exc_info=exc)
|
||||
await logger.aerror("WS loop failed:", exc_info=exc)
|
||||
finally:
|
||||
# shared cleanup for writers & sockets
|
||||
await close()
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.error(f"Unexpected error: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(f"Unexpected error: {e}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
|
||||
|
||||
def extract_transcript(json_data):
|
||||
|
|
@ -1367,13 +1363,13 @@ async def get_elevenlabs_voice_ids(
|
|||
for voice in voices
|
||||
]
|
||||
except ValueError as e:
|
||||
logger.error(f"Error fetching ElevenLabs voices (ValueError): {e}")
|
||||
await logger.aerror(f"Error fetching ElevenLabs voices (ValueError): {e}")
|
||||
return {"error": str(e)}
|
||||
except requests.RequestException as e:
|
||||
logger.error(f"Error fetching ElevenLabs voices (RequestException): {e}")
|
||||
await logger.aerror(f"Error fetching ElevenLabs voices (RequestException): {e}")
|
||||
return {"error": str(e)}
|
||||
except (KeyError, AttributeError, TypeError) as e:
|
||||
# More specific exceptions instead of blind Exception
|
||||
logger.error(f"Error fetching ElevenLabs voices: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
await logger.aerror(f"Error fetching ElevenLabs voices: {e}")
|
||||
await logger.aerror(traceback.format_exc())
|
||||
return {"error": str(e)}
|
||||
|
|
|
|||
|
|
@ -11,11 +11,11 @@ from zoneinfo import ZoneInfo
|
|||
|
||||
from fastapi import APIRouter, Depends, File, HTTPException, UploadFile
|
||||
from fastapi.responses import StreamingResponse
|
||||
from loguru import logger
|
||||
from sqlmodel import col, select
|
||||
|
||||
from langflow.api.schemas import UploadFileResponse
|
||||
from langflow.api.utils import CurrentActiveUser, DbSession
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.services.database.models.file.model import File as UserFile
|
||||
from langflow.services.deps import get_settings_service, get_storage_service
|
||||
from langflow.services.storage.service import StorageService
|
||||
|
|
@ -123,7 +123,9 @@ async def upload_user_file(
|
|||
unique_filename = new_filename
|
||||
else:
|
||||
# For normal files, ensure unique name by appending a count if necessary
|
||||
stmt = select(UserFile).where(col(UserFile.name).like(f"{root_filename}%"))
|
||||
stmt = select(UserFile).where(
|
||||
col(UserFile.name).like(f"{root_filename}%"), UserFile.user_id == current_user.id
|
||||
)
|
||||
existing_files = await session.exec(stmt)
|
||||
files = existing_files.all() # Fetch all matching records
|
||||
|
||||
|
|
@ -486,7 +488,7 @@ async def delete_file(
|
|||
raise
|
||||
except Exception as e:
|
||||
# Log and return a generic server error
|
||||
logger.error("Error deleting file %s: %s", file_id, e)
|
||||
await logger.aerror("Error deleting file %s: %s", file_id, e)
|
||||
raise HTTPException(status_code=500, detail=f"Error deleting file: {e}") from e
|
||||
return {"detail": f"File {file_to_delete.name} deleted successfully"}
|
||||
|
||||
|
|
|
|||
|
|
@ -115,6 +115,7 @@ async def get_servers(
|
|||
session: DbSession,
|
||||
storage_service=Depends(get_storage_service),
|
||||
settings_service=Depends(get_settings_service),
|
||||
*,
|
||||
action_count: bool | None = None,
|
||||
):
|
||||
"""Get the list of available servers."""
|
||||
|
|
@ -140,27 +141,27 @@ async def get_servers(
|
|||
server_info["error"] = "No tools found"
|
||||
except ValueError as e:
|
||||
# Configuration validation errors, invalid URLs, etc.
|
||||
logger.error(f"Configuration error for server {server_name}: {e}")
|
||||
await logger.aerror(f"Configuration error for server {server_name}: {e}")
|
||||
server_info["error"] = f"Configuration error: {e}"
|
||||
except ConnectionError as e:
|
||||
# Network connection and timeout issues
|
||||
logger.error(f"Connection error for server {server_name}: {e}")
|
||||
await logger.aerror(f"Connection error for server {server_name}: {e}")
|
||||
server_info["error"] = f"Connection failed: {e}"
|
||||
except (TimeoutError, asyncio.TimeoutError) as e:
|
||||
# Timeout errors
|
||||
logger.error(f"Timeout error for server {server_name}: {e}")
|
||||
await logger.aerror(f"Timeout error for server {server_name}: {e}")
|
||||
server_info["error"] = "Timeout when checking server tools"
|
||||
except OSError as e:
|
||||
# System-level errors (process execution, file access)
|
||||
logger.error(f"System error for server {server_name}: {e}")
|
||||
await logger.aerror(f"System error for server {server_name}: {e}")
|
||||
server_info["error"] = f"System error: {e}"
|
||||
except (KeyError, TypeError) as e:
|
||||
# Data parsing and access errors
|
||||
logger.error(f"Data error for server {server_name}: {e}")
|
||||
await logger.aerror(f"Data error for server {server_name}: {e}")
|
||||
server_info["error"] = f"Configuration data error: {e}"
|
||||
except (RuntimeError, ProcessLookupError, PermissionError) as e:
|
||||
# Runtime and process-related errors
|
||||
logger.error(f"Runtime error for server {server_name}: {e}")
|
||||
await logger.aerror(f"Runtime error for server {server_name}: {e}")
|
||||
server_info["error"] = f"Runtime error: {e}"
|
||||
except Exception as e: # noqa: BLE001
|
||||
# Generic catch-all for other exceptions (including ExceptionGroup)
|
||||
|
|
@ -168,15 +169,15 @@ async def get_servers(
|
|||
# Extract the first underlying exception for a more meaningful error message
|
||||
underlying_error = e.exceptions[0]
|
||||
if hasattr(underlying_error, "exceptions"):
|
||||
logger.error(
|
||||
await logger.aerror(
|
||||
f"Error checking server {server_name}: {underlying_error}, {underlying_error.exceptions}"
|
||||
)
|
||||
underlying_error = underlying_error.exceptions[0]
|
||||
else:
|
||||
logger.exception(f"Error checking server {server_name}: {underlying_error}")
|
||||
await logger.aexception(f"Error checking server {server_name}: {underlying_error}")
|
||||
server_info["error"] = f"Error loading server: {underlying_error}"
|
||||
else:
|
||||
logger.exception(f"Error checking server {server_name}: {e}")
|
||||
await logger.aexception(f"Error checking server {server_name}: {e}")
|
||||
server_info["error"] = f"Error loading server: {e}"
|
||||
return server_info
|
||||
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -1,4 +1,10 @@
|
|||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
from contextlib import suppress
|
||||
|
||||
from docling_core.types.doc import DoclingDocument
|
||||
from loguru import logger
|
||||
|
||||
from langflow.schema.data import Data
|
||||
from langflow.schema.dataframe import DataFrame
|
||||
|
|
@ -49,3 +55,191 @@ def extract_docling_documents(data_inputs: Data | list[Data] | DataFrame, doc_ke
|
|||
msg = f"Invalid input type in collection: {e}"
|
||||
raise TypeError(msg) from e
|
||||
return documents
|
||||
|
||||
|
||||
def docling_worker(file_paths: list[str], queue, pipeline: str, ocr_engine: str):
|
||||
"""Worker function for processing files with Docling in a separate process."""
|
||||
# Signal handling for graceful shutdown
|
||||
shutdown_requested = False
|
||||
|
||||
def signal_handler(signum: int, frame) -> None: # noqa: ARG001
|
||||
"""Handle shutdown signals gracefully."""
|
||||
nonlocal shutdown_requested
|
||||
signal_names: dict[int, str] = {signal.SIGTERM: "SIGTERM", signal.SIGINT: "SIGINT"}
|
||||
signal_name = signal_names.get(signum, f"signal {signum}")
|
||||
|
||||
logger.debug(f"Docling worker received {signal_name}, initiating graceful shutdown...")
|
||||
shutdown_requested = True
|
||||
|
||||
# Send shutdown notification to parent process
|
||||
with suppress(Exception):
|
||||
queue.put({"error": f"Worker interrupted by {signal_name}", "shutdown": True})
|
||||
|
||||
# Exit gracefully
|
||||
sys.exit(0)
|
||||
|
||||
def check_shutdown() -> None:
|
||||
"""Check if shutdown was requested and exit if so."""
|
||||
if shutdown_requested:
|
||||
logger.info("Shutdown requested, exiting worker...")
|
||||
|
||||
with suppress(Exception):
|
||||
queue.put({"error": "Worker shutdown requested", "shutdown": True})
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
# Register signal handlers early
|
||||
try:
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
logger.debug("Signal handlers registered for graceful shutdown")
|
||||
except (OSError, ValueError) as e:
|
||||
# Some signals might not be available on all platforms
|
||||
logger.warning(f"Warning: Could not register signal handlers: {e}")
|
||||
|
||||
# Check for shutdown before heavy imports
|
||||
check_shutdown()
|
||||
|
||||
try:
|
||||
from docling.datamodel.base_models import ConversionStatus, InputFormat
|
||||
from docling.datamodel.pipeline_options import OcrOptions, PdfPipelineOptions, VlmPipelineOptions
|
||||
from docling.document_converter import DocumentConverter, FormatOption, PdfFormatOption
|
||||
from docling.models.factories import get_ocr_factory
|
||||
from docling.pipeline.vlm_pipeline import VlmPipeline
|
||||
|
||||
# Check for shutdown after imports
|
||||
check_shutdown()
|
||||
logger.debug("Docling dependencies loaded successfully")
|
||||
|
||||
except ModuleNotFoundError:
|
||||
msg = (
|
||||
"Docling is an optional dependency of Langflow. "
|
||||
"Install with `uv pip install 'langflow[docling]'` "
|
||||
"or refer to the documentation"
|
||||
)
|
||||
queue.put({"error": msg})
|
||||
return
|
||||
except ImportError as e:
|
||||
# A different import failed (e.g., a transitive dependency); preserve details.
|
||||
queue.put({"error": f"Failed to import a Docling dependency: {e}"})
|
||||
return
|
||||
except KeyboardInterrupt:
|
||||
logger.warning("KeyboardInterrupt during imports, exiting...")
|
||||
queue.put({"error": "Worker interrupted during imports", "shutdown": True})
|
||||
return
|
||||
|
||||
# Configure the standard PDF pipeline
|
||||
def _get_standard_opts() -> PdfPipelineOptions:
|
||||
check_shutdown() # Check before heavy operations
|
||||
|
||||
pipeline_options = PdfPipelineOptions()
|
||||
pipeline_options.do_ocr = ocr_engine != ""
|
||||
if pipeline_options.do_ocr:
|
||||
ocr_factory = get_ocr_factory(
|
||||
allow_external_plugins=False,
|
||||
)
|
||||
|
||||
ocr_options: OcrOptions = ocr_factory.create_options(
|
||||
kind=ocr_engine,
|
||||
)
|
||||
pipeline_options.ocr_options = ocr_options
|
||||
return pipeline_options
|
||||
|
||||
# Configure the VLM pipeline
|
||||
def _get_vlm_opts() -> VlmPipelineOptions:
|
||||
check_shutdown() # Check before heavy operations
|
||||
return VlmPipelineOptions()
|
||||
|
||||
# Configure the main format options and create the DocumentConverter()
|
||||
def _get_converter() -> DocumentConverter:
|
||||
check_shutdown() # Check before heavy operations
|
||||
|
||||
if pipeline == "standard":
|
||||
pdf_format_option = PdfFormatOption(
|
||||
pipeline_options=_get_standard_opts(),
|
||||
)
|
||||
elif pipeline == "vlm":
|
||||
pdf_format_option = PdfFormatOption(pipeline_cls=VlmPipeline, pipeline_options=_get_vlm_opts())
|
||||
else:
|
||||
msg = f"Unknown pipeline: {pipeline!r}"
|
||||
raise ValueError(msg)
|
||||
|
||||
format_options: dict[InputFormat, FormatOption] = {
|
||||
InputFormat.PDF: pdf_format_option,
|
||||
InputFormat.IMAGE: pdf_format_option,
|
||||
}
|
||||
|
||||
return DocumentConverter(format_options=format_options)
|
||||
|
||||
try:
|
||||
# Check for shutdown before creating converter (can be slow)
|
||||
check_shutdown()
|
||||
logger.info(f"Initializing {pipeline} pipeline with OCR: {ocr_engine or 'disabled'}")
|
||||
|
||||
converter = _get_converter()
|
||||
|
||||
# Check for shutdown before processing files
|
||||
check_shutdown()
|
||||
logger.info(f"Starting to process {len(file_paths)} files...")
|
||||
|
||||
# Process files with periodic shutdown checks
|
||||
results = []
|
||||
for i, file_path in enumerate(file_paths):
|
||||
# Check for shutdown before processing each file
|
||||
check_shutdown()
|
||||
|
||||
logger.debug(f"Processing file {i + 1}/{len(file_paths)}: {file_path}")
|
||||
|
||||
try:
|
||||
# Process single file (we can't easily interrupt convert_all)
|
||||
single_result = converter.convert_all([file_path])
|
||||
results.extend(single_result)
|
||||
|
||||
# Check for shutdown after each file
|
||||
check_shutdown()
|
||||
|
||||
except (OSError, ValueError, RuntimeError, ImportError) as file_error:
|
||||
# Handle specific file processing errors
|
||||
logger.error(f"Error processing file {file_path}: {file_error}")
|
||||
# Continue with other files, but check for shutdown
|
||||
check_shutdown()
|
||||
except Exception as file_error: # noqa: BLE001
|
||||
# Catch any other unexpected errors to prevent worker crash
|
||||
logger.error(f"Unexpected error processing file {file_path}: {file_error}")
|
||||
# Continue with other files, but check for shutdown
|
||||
check_shutdown()
|
||||
|
||||
# Final shutdown check before sending results
|
||||
check_shutdown()
|
||||
|
||||
# Process the results while maintaining the original structure
|
||||
processed_data = [
|
||||
{"document": res.document, "file_path": str(res.input.file), "status": res.status.name}
|
||||
if res.status == ConversionStatus.SUCCESS
|
||||
else None
|
||||
for res in results
|
||||
]
|
||||
|
||||
logger.info(f"Successfully processed {len([d for d in processed_data if d])} files")
|
||||
queue.put(processed_data)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
logger.warning("KeyboardInterrupt during processing, exiting gracefully...")
|
||||
queue.put({"error": "Worker interrupted during processing", "shutdown": True})
|
||||
return
|
||||
except Exception as e: # noqa: BLE001
|
||||
if shutdown_requested:
|
||||
logger.exception("Exception occurred during shutdown, exiting...")
|
||||
return
|
||||
|
||||
# Send any processing error to the main process with traceback
|
||||
error_info = {"error": str(e), "traceback": traceback.format_exc()}
|
||||
logger.error(f"Error in worker: {error_info}")
|
||||
queue.put(error_info)
|
||||
finally:
|
||||
logger.info("Docling worker finishing...")
|
||||
# Ensure we don't leave any hanging processes
|
||||
if shutdown_requested:
|
||||
logger.debug("Worker shutdown completed")
|
||||
else:
|
||||
logger.debug("Worker completed normally")
|
||||
|
|
|
|||
|
|
@ -2,10 +2,10 @@ import concurrent.futures
|
|||
import json
|
||||
|
||||
import httpx
|
||||
from loguru import logger
|
||||
from pydantic import BaseModel, SecretStr
|
||||
|
||||
from langflow.field_typing import Embeddings
|
||||
from langflow.logging.logger import logger
|
||||
|
||||
|
||||
class AIMLEmbeddingsImpl(BaseModel, Embeddings):
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
from loguru import logger
|
||||
|
||||
from langflow.graph.schema import ResultData, RunOutputs
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.data import Data
|
||||
from langflow.schema.message import Message
|
||||
|
||||
|
|
|
|||
|
|
@ -1 +0,0 @@
|
|||
# noqa: A005
|
||||
|
|
@ -0,0 +1,137 @@
|
|||
import math
|
||||
from collections import Counter
|
||||
from pathlib import Path
|
||||
from uuid import UUID
|
||||
|
||||
from langflow.services.database.models.user.crud import get_user_by_id
|
||||
from langflow.services.deps import session_scope
|
||||
|
||||
|
||||
def compute_tfidf(documents: list[str], query_terms: list[str]) -> list[float]:
|
||||
"""Compute TF-IDF scores for query terms across a collection of documents.
|
||||
|
||||
Args:
|
||||
documents: List of document strings
|
||||
query_terms: List of query terms to score
|
||||
|
||||
Returns:
|
||||
List of TF-IDF scores for each document
|
||||
"""
|
||||
# Tokenize documents (simple whitespace splitting)
|
||||
tokenized_docs = [doc.lower().split() for doc in documents]
|
||||
n_docs = len(documents)
|
||||
|
||||
# Calculate document frequency for each term
|
||||
document_frequencies = {}
|
||||
for term in query_terms:
|
||||
document_frequencies[term] = sum(1 for doc in tokenized_docs if term.lower() in doc)
|
||||
|
||||
scores = []
|
||||
|
||||
for doc_tokens in tokenized_docs:
|
||||
doc_score = 0.0
|
||||
doc_length = len(doc_tokens)
|
||||
term_counts = Counter(doc_tokens)
|
||||
|
||||
for term in query_terms:
|
||||
term_lower = term.lower()
|
||||
|
||||
# Term frequency (TF)
|
||||
tf = term_counts[term_lower] / doc_length if doc_length > 0 else 0
|
||||
|
||||
# Inverse document frequency (IDF)
|
||||
idf = math.log(n_docs / document_frequencies[term]) if document_frequencies[term] > 0 else 0
|
||||
|
||||
# TF-IDF score
|
||||
doc_score += tf * idf
|
||||
|
||||
scores.append(doc_score)
|
||||
|
||||
return scores
|
||||
|
||||
|
||||
def compute_bm25(documents: list[str], query_terms: list[str], k1: float = 1.2, b: float = 0.75) -> list[float]:
|
||||
"""Compute BM25 scores for query terms across a collection of documents.
|
||||
|
||||
Args:
|
||||
documents: List of document strings
|
||||
query_terms: List of query terms to score
|
||||
k1: Controls term frequency scaling (default: 1.2)
|
||||
b: Controls document length normalization (default: 0.75)
|
||||
|
||||
Returns:
|
||||
List of BM25 scores for each document
|
||||
"""
|
||||
# Tokenize documents
|
||||
tokenized_docs = [doc.lower().split() for doc in documents]
|
||||
n_docs = len(documents)
|
||||
|
||||
# Calculate average document length
|
||||
avg_doc_length = sum(len(doc) for doc in tokenized_docs) / n_docs if n_docs > 0 else 0
|
||||
|
||||
# Handle edge case where all documents are empty
|
||||
if avg_doc_length == 0:
|
||||
return [0.0] * n_docs
|
||||
|
||||
# Calculate document frequency for each term
|
||||
document_frequencies = {}
|
||||
for term in query_terms:
|
||||
document_frequencies[term] = sum(1 for doc in tokenized_docs if term.lower() in doc)
|
||||
|
||||
scores = []
|
||||
|
||||
for doc_tokens in tokenized_docs:
|
||||
doc_score = 0.0
|
||||
doc_length = len(doc_tokens)
|
||||
term_counts = Counter(doc_tokens)
|
||||
|
||||
for term in query_terms:
|
||||
term_lower = term.lower()
|
||||
|
||||
# Term frequency in document
|
||||
tf = term_counts[term_lower]
|
||||
|
||||
# Inverse document frequency (IDF)
|
||||
# Use standard BM25 IDF formula that ensures non-negative values
|
||||
idf = math.log(n_docs / document_frequencies[term]) if document_frequencies[term] > 0 else 0
|
||||
|
||||
# BM25 score calculation
|
||||
numerator = tf * (k1 + 1)
|
||||
denominator = tf + k1 * (1 - b + b * (doc_length / avg_doc_length))
|
||||
|
||||
# Handle division by zero when tf=0 and k1=0
|
||||
term_score = 0 if denominator == 0 else idf * (numerator / denominator)
|
||||
|
||||
doc_score += term_score
|
||||
|
||||
scores.append(doc_score)
|
||||
|
||||
return scores
|
||||
|
||||
|
||||
async def get_knowledge_bases(kb_root: Path, user_id: UUID | str) -> list[str]:
|
||||
"""Retrieve a list of available knowledge bases.
|
||||
|
||||
Returns:
|
||||
A list of knowledge base names.
|
||||
"""
|
||||
if not kb_root.exists():
|
||||
return []
|
||||
|
||||
# Get the current user
|
||||
async with session_scope() as db:
|
||||
if not user_id:
|
||||
msg = "User ID is required for fetching knowledge bases."
|
||||
raise ValueError(msg)
|
||||
user_id = UUID(user_id) if isinstance(user_id, str) else user_id
|
||||
current_user = await get_user_by_id(db, user_id)
|
||||
if not current_user:
|
||||
msg = f"User with ID {user_id} not found."
|
||||
raise ValueError(msg)
|
||||
kb_user = current_user.username
|
||||
kb_path = kb_root / kb_user
|
||||
|
||||
if not kb_path.exists():
|
||||
return []
|
||||
|
||||
return [str(d.name) for d in kb_path.iterdir() if not d.name.startswith(".") and d.is_dir()]
|
||||
|
|
@ -2,7 +2,8 @@ from functools import lru_cache
|
|||
from typing import Any
|
||||
|
||||
import httpx
|
||||
from loguru import logger
|
||||
|
||||
from langflow.logging.logger import logger
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
|
|
|
|||
|
|
@ -15,12 +15,12 @@ import httpx
|
|||
from anyio import ClosedResourceError
|
||||
from httpx import codes as httpx_codes
|
||||
from langchain_core.tools import StructuredTool
|
||||
from loguru import logger
|
||||
from mcp import ClientSession
|
||||
from mcp.shared.exceptions import McpError
|
||||
from pydantic import BaseModel, Field, create_model
|
||||
from sqlmodel import select
|
||||
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.services.database.models.flow.model import Flow
|
||||
from langflow.services.deps import get_settings_service
|
||||
|
||||
|
|
@ -214,7 +214,7 @@ def create_tool_coroutine(tool_name: str, arg_schema: type[BaseModel], client) -
|
|||
try:
|
||||
return await client.run_tool(tool_name, arguments=validated.model_dump())
|
||||
except Exception as e:
|
||||
logger.error(f"Tool '{tool_name}' execution failed: {e}")
|
||||
await logger.aerror(f"Tool '{tool_name}' execution failed: {e}")
|
||||
# Re-raise with more context
|
||||
msg = f"Tool '{tool_name}' execution failed: {e}"
|
||||
raise ValueError(msg) from e
|
||||
|
|
@ -264,7 +264,7 @@ def get_unique_name(base_name, max_length, existing_names):
|
|||
i += 1
|
||||
|
||||
|
||||
async def get_flow_snake_case(flow_name: str, user_id: str, session, is_action: bool | None = None) -> Flow | None:
|
||||
async def get_flow_snake_case(flow_name: str, user_id: str, session, *, is_action: bool | None = None) -> Flow | None:
|
||||
uuid_user_id = UUID(user_id) if isinstance(user_id, str) else user_id
|
||||
stmt = select(Flow).where(Flow.user_id == uuid_user_id).where(Flow.is_component == False) # noqa: E712
|
||||
flows = (await session.exec(stmt)).all()
|
||||
|
|
@ -506,7 +506,7 @@ class MCPSessionManager:
|
|||
break
|
||||
except (RuntimeError, KeyError, ClosedResourceError, ValueError, asyncio.TimeoutError) as e:
|
||||
# Handle common recoverable errors without stopping the cleanup loop
|
||||
logger.warning(f"Error in periodic cleanup: {e}")
|
||||
await logger.awarning(f"Error in periodic cleanup: {e}")
|
||||
|
||||
async def _cleanup_idle_sessions(self):
|
||||
"""Clean up sessions that have been idle for too long."""
|
||||
|
|
@ -523,7 +523,7 @@ class MCPSessionManager:
|
|||
|
||||
# Clean up idle sessions
|
||||
for session_id in sessions_to_remove:
|
||||
logger.info(f"Cleaning up idle session {session_id} for server {server_key}")
|
||||
await logger.ainfo(f"Cleaning up idle session {session_id} for server {server_key}")
|
||||
await self._cleanup_session_by_id(server_key, session_id)
|
||||
|
||||
# Remove server entry if no sessions left
|
||||
|
|
@ -561,7 +561,7 @@ class MCPSessionManager:
|
|||
# Use a shorter timeout for the connectivity test to fail fast
|
||||
response = await asyncio.wait_for(session.list_tools(), timeout=3.0)
|
||||
except (asyncio.TimeoutError, ConnectionError, OSError, ValueError) as e:
|
||||
logger.debug(f"Session connectivity test failed (standard error): {e}")
|
||||
await logger.adebug(f"Session connectivity test failed (standard error): {e}")
|
||||
return False
|
||||
except Exception as e:
|
||||
# Handle MCP-specific errors that might not be in the standard list
|
||||
|
|
@ -574,27 +574,27 @@ class MCPSessionManager:
|
|||
or "Transport closed" in error_str
|
||||
or "Stream closed" in error_str
|
||||
):
|
||||
logger.debug(f"Session connectivity test failed (MCP connection error): {e}")
|
||||
await logger.adebug(f"Session connectivity test failed (MCP connection error): {e}")
|
||||
return False
|
||||
# Re-raise unexpected errors
|
||||
logger.warning(f"Unexpected error in connectivity test: {e}")
|
||||
await logger.awarning(f"Unexpected error in connectivity test: {e}")
|
||||
raise
|
||||
else:
|
||||
# Validate that we got a meaningful response
|
||||
if response is None:
|
||||
logger.debug("Session connectivity test failed: received None response")
|
||||
await logger.adebug("Session connectivity test failed: received None response")
|
||||
return False
|
||||
try:
|
||||
# Check if we can access the tools list (even if empty)
|
||||
tools = getattr(response, "tools", None)
|
||||
if tools is None:
|
||||
logger.debug("Session connectivity test failed: no tools attribute in response")
|
||||
await logger.adebug("Session connectivity test failed: no tools attribute in response")
|
||||
return False
|
||||
except (AttributeError, TypeError) as e:
|
||||
logger.debug(f"Session connectivity test failed while validating response: {e}")
|
||||
await logger.adebug(f"Session connectivity test failed while validating response: {e}")
|
||||
return False
|
||||
else:
|
||||
logger.debug(f"Session connectivity test passed: found {len(tools)} tools")
|
||||
await logger.adebug(f"Session connectivity test passed: found {len(tools)} tools")
|
||||
return True
|
||||
|
||||
async def get_session(self, context_id: str, connection_params, transport_type: str):
|
||||
|
|
@ -625,32 +625,32 @@ class MCPSessionManager:
|
|||
|
||||
# Quick health check
|
||||
if await self._validate_session_connectivity(session):
|
||||
logger.debug(f"Reusing existing session {session_id} for server {server_key}")
|
||||
await logger.adebug(f"Reusing existing session {session_id} for server {server_key}")
|
||||
# record mapping & bump ref-count for backwards compatibility
|
||||
self._context_to_session[context_id] = (server_key, session_id)
|
||||
self._session_refcount[(server_key, session_id)] = (
|
||||
self._session_refcount.get((server_key, session_id), 0) + 1
|
||||
)
|
||||
return session
|
||||
logger.info(f"Session {session_id} for server {server_key} failed health check, cleaning up")
|
||||
await logger.ainfo(f"Session {session_id} for server {server_key} failed health check, cleaning up")
|
||||
await self._cleanup_session_by_id(server_key, session_id)
|
||||
else:
|
||||
# Task is done, clean up
|
||||
logger.info(f"Session {session_id} for server {server_key} task is done, cleaning up")
|
||||
await logger.ainfo(f"Session {session_id} for server {server_key} task is done, cleaning up")
|
||||
await self._cleanup_session_by_id(server_key, session_id)
|
||||
|
||||
# Check if we've reached the maximum number of sessions for this server
|
||||
if len(sessions) >= MAX_SESSIONS_PER_SERVER:
|
||||
# Remove the oldest session
|
||||
oldest_session_id = min(sessions.keys(), key=lambda x: sessions[x]["last_used"])
|
||||
logger.info(
|
||||
await logger.ainfo(
|
||||
f"Maximum sessions reached for server {server_key}, removing oldest session {oldest_session_id}"
|
||||
)
|
||||
await self._cleanup_session_by_id(server_key, oldest_session_id)
|
||||
|
||||
# Create new session
|
||||
session_id = f"{server_key}_{len(sessions)}"
|
||||
logger.info(f"Creating new session {session_id} for server {server_key}")
|
||||
await logger.ainfo(f"Creating new session {session_id} for server {server_key}")
|
||||
|
||||
if transport_type == "stdio":
|
||||
session, task = await self._create_stdio_session(session_id, connection_params)
|
||||
|
|
@ -700,7 +700,7 @@ class MCPSessionManager:
|
|||
try:
|
||||
await event.wait()
|
||||
except asyncio.CancelledError:
|
||||
logger.info(f"Session {session_id} is shutting down")
|
||||
await logger.ainfo(f"Session {session_id} is shutting down")
|
||||
except Exception as e: # noqa: BLE001
|
||||
if not session_future.done():
|
||||
session_future.set_exception(e)
|
||||
|
|
@ -723,7 +723,7 @@ class MCPSessionManager:
|
|||
await task
|
||||
self._background_tasks.discard(task)
|
||||
msg = f"Timeout waiting for STDIO session {session_id} to initialize"
|
||||
logger.error(msg)
|
||||
await logger.aerror(msg)
|
||||
raise ValueError(msg) from timeout_err
|
||||
|
||||
return session, task
|
||||
|
|
@ -759,7 +759,7 @@ class MCPSessionManager:
|
|||
try:
|
||||
await event.wait()
|
||||
except asyncio.CancelledError:
|
||||
logger.info(f"Session {session_id} is shutting down")
|
||||
await logger.ainfo(f"Session {session_id} is shutting down")
|
||||
except Exception as e: # noqa: BLE001
|
||||
if not session_future.done():
|
||||
session_future.set_exception(e)
|
||||
|
|
@ -782,7 +782,7 @@ class MCPSessionManager:
|
|||
await task
|
||||
self._background_tasks.discard(task)
|
||||
msg = f"Timeout waiting for SSE session {session_id} to initialize"
|
||||
logger.error(msg)
|
||||
await logger.aerror(msg)
|
||||
raise ValueError(msg) from timeout_err
|
||||
|
||||
return session, task
|
||||
|
|
@ -813,9 +813,9 @@ class MCPSessionManager:
|
|||
if hasattr(session, "aclose"):
|
||||
try:
|
||||
await session.aclose()
|
||||
logger.debug("Successfully closed session %s using aclose()", session_id)
|
||||
await logger.adebug("Successfully closed session %s using aclose()", session_id)
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.debug("Error closing session %s with aclose(): %s", session_id, e)
|
||||
await logger.adebug("Error closing session %s with aclose(): %s", session_id, e)
|
||||
|
||||
# If no aclose, try regular close method
|
||||
elif hasattr(session, "close"):
|
||||
|
|
@ -824,18 +824,20 @@ class MCPSessionManager:
|
|||
if inspect.iscoroutinefunction(session.close):
|
||||
# It's an async method
|
||||
await session.close()
|
||||
logger.debug("Successfully closed session %s using async close()", session_id)
|
||||
await logger.adebug("Successfully closed session %s using async close()", session_id)
|
||||
else:
|
||||
# Try calling it and check if result is awaitable
|
||||
close_result = session.close()
|
||||
if inspect.isawaitable(close_result):
|
||||
await close_result
|
||||
logger.debug("Successfully closed session %s using awaitable close()", session_id)
|
||||
await logger.adebug(
|
||||
"Successfully closed session %s using awaitable close()", session_id
|
||||
)
|
||||
else:
|
||||
# It's a synchronous close
|
||||
logger.debug("Successfully closed session %s using sync close()", session_id)
|
||||
await logger.adebug("Successfully closed session %s using sync close()", session_id)
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.debug("Error closing session %s with close(): %s", session_id, e)
|
||||
await logger.adebug("Error closing session %s with close(): %s", session_id, e)
|
||||
|
||||
# Cancel the background task which will properly close the session
|
||||
if "task" in session_info:
|
||||
|
|
@ -845,9 +847,9 @@ class MCPSessionManager:
|
|||
try:
|
||||
await task
|
||||
except asyncio.CancelledError:
|
||||
logger.info(f"Cancelled task for session {session_id}")
|
||||
await logger.ainfo(f"Cancelled task for session {session_id}")
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.warning(f"Error cleaning up session {session_id}: {e}")
|
||||
await logger.awarning(f"Error cleaning up session {session_id}: {e}")
|
||||
finally:
|
||||
# Remove from sessions dict
|
||||
del sessions[session_id]
|
||||
|
|
@ -900,7 +902,7 @@ class MCPSessionManager:
|
|||
"""
|
||||
mapping = self._context_to_session.get(context_id)
|
||||
if not mapping:
|
||||
logger.debug(f"No session mapping found for context_id {context_id}")
|
||||
await logger.adebug(f"No session mapping found for context_id {context_id}")
|
||||
return
|
||||
|
||||
server_key, session_id = mapping
|
||||
|
|
@ -1031,7 +1033,7 @@ class MCPStdioClient:
|
|||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
logger.debug(f"Attempting to run tool '{tool_name}' (attempt {attempt + 1}/{max_retries})")
|
||||
await logger.adebug(f"Attempting to run tool '{tool_name}' (attempt {attempt + 1}/{max_retries})")
|
||||
# Get or create persistent session
|
||||
session = await self._get_or_create_session()
|
||||
|
||||
|
|
@ -1041,7 +1043,7 @@ class MCPStdioClient:
|
|||
)
|
||||
except Exception as e:
|
||||
current_error_type = type(e).__name__
|
||||
logger.warning(f"Tool '{tool_name}' failed on attempt {attempt + 1}: {current_error_type} - {e}")
|
||||
await logger.awarning(f"Tool '{tool_name}' failed on attempt {attempt + 1}: {current_error_type} - {e}")
|
||||
|
||||
# Import specific MCP error types for detection
|
||||
try:
|
||||
|
|
@ -1056,14 +1058,14 @@ class MCPStdioClient:
|
|||
|
||||
# If we're getting the same error type repeatedly, don't retry
|
||||
if last_error_type == current_error_type and attempt > 0:
|
||||
logger.error(f"Repeated {current_error_type} error for tool '{tool_name}', not retrying")
|
||||
await logger.aerror(f"Repeated {current_error_type} error for tool '{tool_name}', not retrying")
|
||||
break
|
||||
|
||||
last_error_type = current_error_type
|
||||
|
||||
# If it's a connection error (ClosedResourceError or MCP connection closed) and we have retries left
|
||||
if (is_closed_resource_error or is_mcp_connection_error) and attempt < max_retries - 1:
|
||||
logger.warning(
|
||||
await logger.awarning(
|
||||
f"MCP session connection issue for tool '{tool_name}', retrying with fresh session..."
|
||||
)
|
||||
# Clean up the dead session
|
||||
|
|
@ -1076,7 +1078,7 @@ class MCPStdioClient:
|
|||
|
||||
# If it's a timeout error and we have retries left, try once more
|
||||
if is_timeout_error and attempt < max_retries - 1:
|
||||
logger.warning(f"Tool '{tool_name}' timed out, retrying...")
|
||||
await logger.awarning(f"Tool '{tool_name}' timed out, retrying...")
|
||||
# Don't clean up session for timeouts, might just be a slow response
|
||||
await asyncio.sleep(1.0)
|
||||
continue
|
||||
|
|
@ -1089,7 +1091,7 @@ class MCPStdioClient:
|
|||
or is_timeout_error
|
||||
):
|
||||
msg = f"Failed to run tool '{tool_name}' after {attempt + 1} attempts: {e}"
|
||||
logger.error(msg)
|
||||
await logger.aerror(msg)
|
||||
# Clean up failed session from cache
|
||||
if self._session_context and self._component_cache:
|
||||
cache_key = f"mcp_session_stdio_{self._session_context}"
|
||||
|
|
@ -1099,12 +1101,12 @@ class MCPStdioClient:
|
|||
# Re-raise unexpected errors
|
||||
raise
|
||||
else:
|
||||
logger.debug(f"Tool '{tool_name}' completed successfully")
|
||||
await logger.adebug(f"Tool '{tool_name}' completed successfully")
|
||||
return result
|
||||
|
||||
# This should never be reached due to the exception handling above
|
||||
msg = f"Failed to run tool '{tool_name}': Maximum retries exceeded with repeated {last_error_type} errors"
|
||||
logger.error(msg)
|
||||
await logger.aerror(msg)
|
||||
raise ValueError(msg)
|
||||
|
||||
async def disconnect(self):
|
||||
|
|
@ -1213,7 +1215,7 @@ class MCPSseClient:
|
|||
return response.headers.get("Location", url)
|
||||
# Don't treat 404 as an error here - let the main connection handle it
|
||||
except (httpx.RequestError, httpx.HTTPError) as e:
|
||||
logger.warning(f"Error checking redirects: {e}")
|
||||
await logger.awarning(f"Error checking redirects: {e}")
|
||||
return url
|
||||
|
||||
async def _connect_to_server(
|
||||
|
|
@ -1336,7 +1338,7 @@ class MCPSseClient:
|
|||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
logger.debug(f"Attempting to run tool '{tool_name}' (attempt {attempt + 1}/{max_retries})")
|
||||
await logger.adebug(f"Attempting to run tool '{tool_name}' (attempt {attempt + 1}/{max_retries})")
|
||||
# Get or create persistent session
|
||||
session = await self._get_or_create_session()
|
||||
|
||||
|
|
@ -1349,7 +1351,7 @@ class MCPSseClient:
|
|||
)
|
||||
except Exception as e:
|
||||
current_error_type = type(e).__name__
|
||||
logger.warning(f"Tool '{tool_name}' failed on attempt {attempt + 1}: {current_error_type} - {e}")
|
||||
await logger.awarning(f"Tool '{tool_name}' failed on attempt {attempt + 1}: {current_error_type} - {e}")
|
||||
|
||||
# Import specific MCP error types for detection
|
||||
try:
|
||||
|
|
@ -1367,14 +1369,14 @@ class MCPSseClient:
|
|||
|
||||
# If we're getting the same error type repeatedly, don't retry
|
||||
if last_error_type == current_error_type and attempt > 0:
|
||||
logger.error(f"Repeated {current_error_type} error for tool '{tool_name}', not retrying")
|
||||
await logger.aerror(f"Repeated {current_error_type} error for tool '{tool_name}', not retrying")
|
||||
break
|
||||
|
||||
last_error_type = current_error_type
|
||||
|
||||
# If it's a connection error (ClosedResourceError or MCP connection closed) and we have retries left
|
||||
if (is_closed_resource_error or is_mcp_connection_error) and attempt < max_retries - 1:
|
||||
logger.warning(
|
||||
await logger.awarning(
|
||||
f"MCP session connection issue for tool '{tool_name}', retrying with fresh session..."
|
||||
)
|
||||
# Clean up the dead session
|
||||
|
|
@ -1387,7 +1389,7 @@ class MCPSseClient:
|
|||
|
||||
# If it's a timeout error and we have retries left, try once more
|
||||
if is_timeout_error and attempt < max_retries - 1:
|
||||
logger.warning(f"Tool '{tool_name}' timed out, retrying...")
|
||||
await logger.awarning(f"Tool '{tool_name}' timed out, retrying...")
|
||||
# Don't clean up session for timeouts, might just be a slow response
|
||||
await asyncio.sleep(1.0)
|
||||
continue
|
||||
|
|
@ -1400,7 +1402,7 @@ class MCPSseClient:
|
|||
or is_timeout_error
|
||||
):
|
||||
msg = f"Failed to run tool '{tool_name}' after {attempt + 1} attempts: {e}"
|
||||
logger.error(msg)
|
||||
await logger.aerror(msg)
|
||||
# Clean up failed session from cache
|
||||
if self._session_context and self._component_cache:
|
||||
cache_key = f"mcp_session_sse_{self._session_context}"
|
||||
|
|
@ -1410,12 +1412,12 @@ class MCPSseClient:
|
|||
# Re-raise unexpected errors
|
||||
raise
|
||||
else:
|
||||
logger.debug(f"Tool '{tool_name}' completed successfully")
|
||||
await logger.adebug(f"Tool '{tool_name}' completed successfully")
|
||||
return result
|
||||
|
||||
# This should never be reached due to the exception handling above
|
||||
msg = f"Failed to run tool '{tool_name}': Maximum retries exceeded with repeated {last_error_type} errors"
|
||||
logger.error(msg)
|
||||
await logger.aerror(msg)
|
||||
raise ValueError(msg)
|
||||
|
||||
async def disconnect(self):
|
||||
|
|
|
|||
|
|
@ -252,7 +252,7 @@ class LCModelComponent(Component):
|
|||
if stream:
|
||||
lf_message, result = await self._handle_stream(runnable, inputs)
|
||||
else:
|
||||
message = runnable.invoke(inputs)
|
||||
message = await runnable.ainvoke(inputs)
|
||||
result = message.content if hasattr(message, "content") else message
|
||||
if isinstance(message, AIMessage):
|
||||
status_message = self.build_status_message(message)
|
||||
|
|
@ -288,7 +288,7 @@ class LCModelComponent(Component):
|
|||
else:
|
||||
session_id = None
|
||||
model_message = Message(
|
||||
text=runnable.stream(inputs),
|
||||
text=runnable.astream(inputs),
|
||||
sender=MESSAGE_SENDER_AI,
|
||||
sender_name="AI",
|
||||
properties={"icon": self.icon, "state": "partial"},
|
||||
|
|
@ -298,7 +298,7 @@ class LCModelComponent(Component):
|
|||
lf_message = await self.send_message(model_message)
|
||||
result = lf_message.text
|
||||
else:
|
||||
message = runnable.invoke(inputs)
|
||||
message = await runnable.ainvoke(inputs)
|
||||
result = message.content if hasattr(message, "content") else message
|
||||
return lf_message, result
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,35 @@ from .model_metadata import create_model_metadata
|
|||
|
||||
# Unified model metadata - single source of truth
|
||||
OPENAI_MODELS_DETAILED = [
|
||||
# GPT-5 Series
|
||||
create_model_metadata(
|
||||
provider="OpenAI",
|
||||
name="gpt-5",
|
||||
icon="OpenAI",
|
||||
tool_calling=True,
|
||||
reasoning=True,
|
||||
),
|
||||
create_model_metadata(
|
||||
provider="OpenAI",
|
||||
name="gpt-5-mini",
|
||||
icon="OpenAI",
|
||||
tool_calling=True,
|
||||
reasoning=True,
|
||||
),
|
||||
create_model_metadata(
|
||||
provider="OpenAI",
|
||||
name="gpt-5-nano",
|
||||
icon="OpenAI",
|
||||
tool_calling=True,
|
||||
reasoning=True,
|
||||
),
|
||||
create_model_metadata(
|
||||
provider="OpenAI",
|
||||
name="gpt-5-chat-latest",
|
||||
icon="OpenAI",
|
||||
tool_calling=False,
|
||||
reasoning=True,
|
||||
),
|
||||
# Regular OpenAI Models
|
||||
create_model_metadata(provider="OpenAI", name="gpt-4o-mini", icon="OpenAI", tool_calling=True),
|
||||
create_model_metadata(provider="OpenAI", name="gpt-4o", icon="OpenAI", tool_calling=True),
|
||||
|
|
|
|||
|
|
@ -3,10 +3,10 @@ from typing import Any
|
|||
|
||||
from fastapi import HTTPException
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
from loguru import logger
|
||||
|
||||
from langflow.inputs.inputs import DefaultPromptField
|
||||
from langflow.interface.utils import extract_input_variables_from_prompt
|
||||
from langflow.logging.logger import logger
|
||||
|
||||
_INVALID_CHARACTERS = {
|
||||
" ",
|
||||
|
|
|
|||
|
|
@ -3,13 +3,13 @@ from __future__ import annotations
|
|||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from langchain_core.tools import BaseTool, ToolException
|
||||
from loguru import logger
|
||||
from typing_extensions import override
|
||||
|
||||
from langflow.base.flow_processing.utils import build_data_from_result_data, format_flow_output_data
|
||||
from langflow.graph.graph.base import Graph # cannot be a part of TYPE_CHECKING # noqa: TC001
|
||||
from langflow.graph.vertex.base import Vertex # cannot be a part of TYPE_CHECKING # noqa: TC001
|
||||
from langflow.helpers.flow import build_schema_from_inputs, get_arg_names, get_flow_inputs, run_flow
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.utils.async_helpers import run_until_complete
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
|
@ -109,7 +109,7 @@ class FlowTool(BaseTool):
|
|||
try:
|
||||
run_id = self.graph.run_id if hasattr(self, "graph") and self.graph else None
|
||||
except Exception: # noqa: BLE001
|
||||
logger.opt(exception=True).warning("Failed to set run_id")
|
||||
logger.warning("Failed to set run_id", exc_info=True)
|
||||
run_id = None
|
||||
run_outputs = await run_flow(
|
||||
tweaks={key: {"input_value": value} for key, value in tweaks.items()},
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
from abc import abstractmethod
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from loguru import logger
|
||||
from typing_extensions import override
|
||||
|
||||
from langflow.custom.custom_component.component import Component, _get_component_toolkit
|
||||
|
|
@ -9,11 +8,8 @@ from langflow.field_typing import Tool
|
|||
from langflow.graph.graph.base import Graph
|
||||
from langflow.graph.vertex.base import Vertex
|
||||
from langflow.helpers.flow import get_flow_inputs
|
||||
from langflow.inputs.inputs import (
|
||||
DropdownInput,
|
||||
InputTypes,
|
||||
MessageInput,
|
||||
)
|
||||
from langflow.inputs.inputs import DropdownInput, InputTypes, MessageInput
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.data import Data
|
||||
from langflow.schema.dataframe import DataFrame
|
||||
from langflow.schema.dotdict import dotdict
|
||||
|
|
|
|||
34
src/backend/base/langflow/components/FAISS/__init__.py
Normal file
34
src/backend/base/langflow/components/FAISS/__init__.py
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from langflow.components._importing import import_mod
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .faiss import FaissVectorStoreComponent
|
||||
|
||||
_dynamic_imports = {
|
||||
"FaissVectorStoreComponent": "faiss",
|
||||
}
|
||||
|
||||
__all__ = [
|
||||
"FaissVectorStoreComponent",
|
||||
]
|
||||
|
||||
|
||||
def __getattr__(attr_name: str) -> Any:
|
||||
"""Lazily import FAISS components on attribute access."""
|
||||
if attr_name not in _dynamic_imports:
|
||||
msg = f"module '{__name__}' has no attribute '{attr_name}'"
|
||||
raise AttributeError(msg)
|
||||
try:
|
||||
result = import_mod(attr_name, _dynamic_imports[attr_name], __spec__.parent)
|
||||
except (ModuleNotFoundError, ImportError, AttributeError) as e:
|
||||
msg = f"Could not import '{attr_name}' from '{__name__}': {e}"
|
||||
raise AttributeError(msg) from e
|
||||
globals()[attr_name] = result
|
||||
return result
|
||||
|
||||
|
||||
def __dir__() -> list[str]:
|
||||
return list(__all__)
|
||||
|
|
@ -4,13 +4,13 @@ from typing import Any
|
|||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from langchain.tools import StructuredTool
|
||||
from loguru import logger
|
||||
from markdown import markdown
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langflow.base.langchain_utilities.model import LCToolComponent
|
||||
from langflow.field_typing import Tool
|
||||
from langflow.inputs.inputs import MultilineInput, SecretStrInput, StrInput
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.data import Data
|
||||
|
||||
MIN_ROWS_IN_TABLE = 3
|
||||
|
|
@ -84,7 +84,7 @@ class AddContentToPage(LCToolComponent):
|
|||
error_message += f" Status code: {e.response.status_code}, Response: {e.response.text}"
|
||||
return error_message
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.opt(exception=True).debug("Error adding content to Notion page")
|
||||
logger.debug("Error adding content to Notion page", exc_info=True)
|
||||
return f"Error: An unexpected error occurred while adding content to Notion page. {e}"
|
||||
|
||||
def process_node(self, node):
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
import requests
|
||||
from langchain.tools import StructuredTool
|
||||
from loguru import logger
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langflow.base.langchain_utilities.model import LCToolComponent
|
||||
from langflow.field_typing import Tool
|
||||
from langflow.inputs.inputs import SecretStrInput, StrInput
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.data import Data
|
||||
|
||||
|
||||
|
|
@ -64,5 +64,5 @@ class NotionDatabaseProperties(LCToolComponent):
|
|||
except ValueError as e:
|
||||
return f"Error parsing Notion API response: {e}"
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.opt(exception=True).debug("Error fetching Notion database properties")
|
||||
logger.debug("Error fetching Notion database properties", exc_info=True)
|
||||
return f"An unexpected error occurred: {e}"
|
||||
|
|
|
|||
|
|
@ -3,12 +3,12 @@ from typing import Any
|
|||
|
||||
import requests
|
||||
from langchain.tools import StructuredTool
|
||||
from loguru import logger
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langflow.base.langchain_utilities.model import LCToolComponent
|
||||
from langflow.field_typing import Tool
|
||||
from langflow.inputs.inputs import MultilineInput, SecretStrInput, StrInput
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.data import Data
|
||||
|
||||
|
||||
|
|
@ -118,5 +118,5 @@ class NotionListPages(LCToolComponent):
|
|||
except KeyError:
|
||||
return "Unexpected response format from Notion API"
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.opt(exception=True).debug("Error querying Notion database")
|
||||
logger.debug("Error querying Notion database", exc_info=True)
|
||||
return f"An unexpected error occurred: {e}"
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
import requests
|
||||
from langchain.tools import StructuredTool
|
||||
from loguru import logger
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langflow.base.langchain_utilities.model import LCToolComponent
|
||||
from langflow.field_typing import Tool
|
||||
from langflow.inputs.inputs import SecretStrInput, StrInput
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.data import Data
|
||||
|
||||
|
||||
|
|
@ -65,7 +65,7 @@ class NotionPageContent(LCToolComponent):
|
|||
error_message += f" Status code: {e.response.status_code}, Response: {e.response.text}"
|
||||
return error_message
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.opt(exception=True).debug("Error retrieving Notion page content")
|
||||
logger.debug("Error retrieving Notion page content", exc_info=True)
|
||||
return f"Error: An unexpected error occurred while retrieving Notion page content. {e}"
|
||||
|
||||
def parse_blocks(self, blocks: list) -> str:
|
||||
|
|
|
|||
|
|
@ -3,12 +3,12 @@ from typing import Any
|
|||
|
||||
import requests
|
||||
from langchain.tools import StructuredTool
|
||||
from loguru import logger
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langflow.base.langchain_utilities.model import LCToolComponent
|
||||
from langflow.field_typing import Tool
|
||||
from langflow.inputs.inputs import MultilineInput, SecretStrInput, StrInput
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.data import Data
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,17 +1,9 @@
|
|||
import httpx
|
||||
from loguru import logger
|
||||
|
||||
from langflow.custom.custom_component.component import Component
|
||||
from langflow.field_typing.range_spec import RangeSpec
|
||||
from langflow.io import (
|
||||
BoolInput,
|
||||
DropdownInput,
|
||||
IntInput,
|
||||
MessageTextInput,
|
||||
MultilineInput,
|
||||
Output,
|
||||
SecretStrInput,
|
||||
)
|
||||
from langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, MultilineInput, Output, SecretStrInput
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.data import Data
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import json
|
|||
import re
|
||||
|
||||
from langchain_core.tools import StructuredTool
|
||||
from pydantic import ValidationError
|
||||
|
||||
from langflow.base.agents.agent import LCToolsAgentComponent
|
||||
from langflow.base.agents.events import ExceptionWithMessageError
|
||||
|
|
@ -19,11 +20,13 @@ from langflow.components.langchain_utilities.tool_calling import ToolCallingAgen
|
|||
from langflow.custom.custom_component.component import _get_component_toolkit
|
||||
from langflow.custom.utils import update_component_build_config
|
||||
from langflow.field_typing import Tool
|
||||
from langflow.io import BoolInput, DropdownInput, IntInput, MultilineInput, Output
|
||||
from langflow.helpers.base_model import build_model_from_schema
|
||||
from langflow.io import BoolInput, DropdownInput, IntInput, MultilineInput, Output, TableInput
|
||||
from langflow.logging import logger
|
||||
from langflow.schema.data import Data
|
||||
from langflow.schema.dotdict import dotdict
|
||||
from langflow.schema.message import Message
|
||||
from langflow.schema.table import EditMode
|
||||
|
||||
|
||||
def set_advanced_true(component_input):
|
||||
|
|
@ -78,6 +81,67 @@ class AgentComponent(ToolCallingAgentComponent):
|
|||
advanced=True,
|
||||
show=True,
|
||||
),
|
||||
MultilineInput(
|
||||
name="format_instructions",
|
||||
display_name="Output Format Instructions",
|
||||
info="Generic Template for structured output formatting. Valid only with Structured response.",
|
||||
value=(
|
||||
"You are an AI that extracts structured JSON objects from unstructured text. "
|
||||
"Use a predefined schema with expected types (str, int, float, bool, dict). "
|
||||
"Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. "
|
||||
"Fill missing or ambiguous values with defaults: null for missing values. "
|
||||
"Remove exact duplicates but keep variations that have different field values. "
|
||||
"Always return valid JSON in the expected format, never throw errors. "
|
||||
"If multiple objects can be extracted, return them all in the structured format."
|
||||
),
|
||||
advanced=True,
|
||||
),
|
||||
TableInput(
|
||||
name="output_schema",
|
||||
display_name="Output Schema",
|
||||
info=(
|
||||
"Schema Validation: Define the structure and data types for structured output. "
|
||||
"No validation if no output schema."
|
||||
),
|
||||
advanced=True,
|
||||
required=False,
|
||||
value=[],
|
||||
table_schema=[
|
||||
{
|
||||
"name": "name",
|
||||
"display_name": "Name",
|
||||
"type": "str",
|
||||
"description": "Specify the name of the output field.",
|
||||
"default": "field",
|
||||
"edit_mode": EditMode.INLINE,
|
||||
},
|
||||
{
|
||||
"name": "description",
|
||||
"display_name": "Description",
|
||||
"type": "str",
|
||||
"description": "Describe the purpose of the output field.",
|
||||
"default": "description of field",
|
||||
"edit_mode": EditMode.POPOVER,
|
||||
},
|
||||
{
|
||||
"name": "type",
|
||||
"display_name": "Type",
|
||||
"type": "str",
|
||||
"edit_mode": EditMode.INLINE,
|
||||
"description": ("Indicate the data type of the output field (e.g., str, int, float, bool, dict)."),
|
||||
"options": ["str", "int", "float", "bool", "dict"],
|
||||
"default": "str",
|
||||
},
|
||||
{
|
||||
"name": "multiple",
|
||||
"display_name": "As List",
|
||||
"type": "boolean",
|
||||
"description": "Set to True if this output field should be a list of the specified type.",
|
||||
"default": "False",
|
||||
"edit_mode": EditMode.INLINE,
|
||||
},
|
||||
],
|
||||
),
|
||||
*LCToolsAgentComponent._base_inputs,
|
||||
# removed memory inputs from agent component
|
||||
# *memory_inputs,
|
||||
|
|
@ -94,31 +158,33 @@ class AgentComponent(ToolCallingAgentComponent):
|
|||
Output(name="structured_response", display_name="Structured Response", method="json_response", tool_mode=False),
|
||||
]
|
||||
|
||||
async def get_agent_requirements(self):
|
||||
"""Get the agent requirements for the agent."""
|
||||
llm_model, display_name = await self.get_llm()
|
||||
if llm_model is None:
|
||||
msg = "No language model selected. Please choose a model to proceed."
|
||||
raise ValueError(msg)
|
||||
self.model_name = get_model_name(llm_model, display_name=display_name)
|
||||
|
||||
# Get memory data
|
||||
self.chat_history = await self.get_memory_data()
|
||||
if isinstance(self.chat_history, Message):
|
||||
self.chat_history = [self.chat_history]
|
||||
|
||||
# Add current date tool if enabled
|
||||
if self.add_current_date_tool:
|
||||
if not isinstance(self.tools, list): # type: ignore[has-type]
|
||||
self.tools = []
|
||||
current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)
|
||||
if not isinstance(current_date_tool, StructuredTool):
|
||||
msg = "CurrentDateComponent must be converted to a StructuredTool"
|
||||
raise TypeError(msg)
|
||||
self.tools.append(current_date_tool)
|
||||
return llm_model, self.chat_history, self.tools
|
||||
|
||||
async def message_response(self) -> Message:
|
||||
try:
|
||||
# Get LLM model and validate
|
||||
llm_model, display_name = self.get_llm()
|
||||
if llm_model is None:
|
||||
msg = "No language model selected. Please choose a model to proceed."
|
||||
raise ValueError(msg)
|
||||
self.model_name = get_model_name(llm_model, display_name=display_name)
|
||||
|
||||
# Get memory data
|
||||
self.chat_history = await self.get_memory_data()
|
||||
if isinstance(self.chat_history, Message):
|
||||
self.chat_history = [self.chat_history]
|
||||
|
||||
# Add current date tool if enabled
|
||||
if self.add_current_date_tool:
|
||||
if not isinstance(self.tools, list): # type: ignore[has-type]
|
||||
self.tools = []
|
||||
current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)
|
||||
if not isinstance(current_date_tool, StructuredTool):
|
||||
msg = "CurrentDateComponent must be converted to a StructuredTool"
|
||||
raise TypeError(msg)
|
||||
self.tools.append(current_date_tool)
|
||||
# note the tools are not required to run the agent, hence the validation removed.
|
||||
|
||||
llm_model, self.chat_history, self.tools = await self.get_agent_requirements()
|
||||
# Set up and run agent
|
||||
self.set(
|
||||
llm=llm_model,
|
||||
|
|
@ -132,52 +198,180 @@ class AgentComponent(ToolCallingAgentComponent):
|
|||
|
||||
# Store result for potential JSON output
|
||||
self._agent_result = result
|
||||
# return result
|
||||
|
||||
except (ValueError, TypeError, KeyError) as e:
|
||||
logger.error(f"{type(e).__name__}: {e!s}")
|
||||
await logger.aerror(f"{type(e).__name__}: {e!s}")
|
||||
raise
|
||||
except ExceptionWithMessageError as e:
|
||||
logger.error(f"ExceptionWithMessageError occurred: {e}")
|
||||
await logger.aerror(f"ExceptionWithMessageError occurred: {e}")
|
||||
raise
|
||||
# Avoid catching blind Exception; let truly unexpected exceptions propagate
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error: {e!s}")
|
||||
await logger.aerror(f"Unexpected error: {e!s}")
|
||||
raise
|
||||
else:
|
||||
return result
|
||||
|
||||
async def json_response(self) -> Data:
|
||||
"""Convert agent response to structured JSON Data output."""
|
||||
# Run the regular message response first to get the result
|
||||
if not hasattr(self, "_agent_result"):
|
||||
await self.message_response()
|
||||
def _preprocess_schema(self, schema):
|
||||
"""Preprocess schema to ensure correct data types for build_model_from_schema."""
|
||||
processed_schema = []
|
||||
for field in schema:
|
||||
processed_field = {
|
||||
"name": str(field.get("name", "field")),
|
||||
"type": str(field.get("type", "str")),
|
||||
"description": str(field.get("description", "")),
|
||||
"multiple": field.get("multiple", False),
|
||||
}
|
||||
# Ensure multiple is handled correctly
|
||||
if isinstance(processed_field["multiple"], str):
|
||||
processed_field["multiple"] = processed_field["multiple"].lower() in ["true", "1", "t", "y", "yes"]
|
||||
processed_schema.append(processed_field)
|
||||
return processed_schema
|
||||
|
||||
result = self._agent_result
|
||||
async def build_structured_output_base(self, content: str):
|
||||
"""Build structured output with optional BaseModel validation."""
|
||||
json_pattern = r"\{.*\}"
|
||||
schema_error_msg = "Try setting an output schema"
|
||||
|
||||
# Extract content from result
|
||||
if hasattr(result, "content"):
|
||||
content = result.content
|
||||
elif hasattr(result, "text"):
|
||||
content = result.text
|
||||
else:
|
||||
content = str(result)
|
||||
|
||||
# Try to parse as JSON
|
||||
# Try to parse content as JSON first
|
||||
json_data = None
|
||||
try:
|
||||
json_data = json.loads(content)
|
||||
return Data(data=json_data)
|
||||
except json.JSONDecodeError:
|
||||
# If it's not valid JSON, try to extract JSON from the content
|
||||
json_match = re.search(r"\{.*\}", content, re.DOTALL)
|
||||
json_match = re.search(json_pattern, content, re.DOTALL)
|
||||
if json_match:
|
||||
try:
|
||||
json_data = json.loads(json_match.group())
|
||||
return Data(data=json_data)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
return {"content": content, "error": schema_error_msg}
|
||||
else:
|
||||
return {"content": content, "error": schema_error_msg}
|
||||
|
||||
# If we can't extract JSON, return the raw content as data
|
||||
return Data(data={"content": content, "error": "Could not parse as JSON"})
|
||||
# If no output schema provided, return parsed JSON without validation
|
||||
if not hasattr(self, "output_schema") or not self.output_schema or len(self.output_schema) == 0:
|
||||
return json_data
|
||||
|
||||
# Use BaseModel validation with schema
|
||||
try:
|
||||
processed_schema = self._preprocess_schema(self.output_schema)
|
||||
output_model = build_model_from_schema(processed_schema)
|
||||
|
||||
# Validate against the schema
|
||||
if isinstance(json_data, list):
|
||||
# Multiple objects
|
||||
validated_objects = []
|
||||
for item in json_data:
|
||||
try:
|
||||
validated_obj = output_model.model_validate(item)
|
||||
validated_objects.append(validated_obj.model_dump())
|
||||
except ValidationError as e:
|
||||
await logger.aerror(f"Validation error for item: {e}")
|
||||
# Include invalid items with error info
|
||||
validated_objects.append({"data": item, "validation_error": str(e)})
|
||||
return validated_objects
|
||||
|
||||
# Single object
|
||||
try:
|
||||
validated_obj = output_model.model_validate(json_data)
|
||||
return [validated_obj.model_dump()] # Return as list for consistency
|
||||
except ValidationError as e:
|
||||
await logger.aerror(f"Validation error: {e}")
|
||||
return [{"data": json_data, "validation_error": str(e)}]
|
||||
|
||||
except (TypeError, ValueError) as e:
|
||||
await logger.aerror(f"Error building structured output: {e}")
|
||||
# Fallback to parsed JSON without validation
|
||||
return json_data
|
||||
|
||||
async def json_response(self) -> Data:
|
||||
"""Convert agent response to structured JSON Data output with schema validation."""
|
||||
# Always use structured chat agent for JSON response mode for better JSON formatting
|
||||
try:
|
||||
system_components = []
|
||||
|
||||
# 1. Agent Instructions (system_prompt)
|
||||
agent_instructions = getattr(self, "system_prompt", "") or ""
|
||||
if agent_instructions:
|
||||
system_components.append(f"{agent_instructions}")
|
||||
|
||||
# 2. Format Instructions
|
||||
format_instructions = getattr(self, "format_instructions", "") or ""
|
||||
if format_instructions:
|
||||
system_components.append(f"Format instructions: {format_instructions}")
|
||||
|
||||
# 3. Schema Information from BaseModel
|
||||
if hasattr(self, "output_schema") and self.output_schema and len(self.output_schema) > 0:
|
||||
try:
|
||||
processed_schema = self._preprocess_schema(self.output_schema)
|
||||
output_model = build_model_from_schema(processed_schema)
|
||||
schema_dict = output_model.model_json_schema()
|
||||
schema_info = (
|
||||
"You are given some text that may include format instructions, "
|
||||
"explanations, or other content alongside a JSON schema.\n\n"
|
||||
"Your task:\n"
|
||||
"- Extract only the JSON schema.\n"
|
||||
"- Return it as valid JSON.\n"
|
||||
"- Do not include format instructions, explanations, or extra text.\n\n"
|
||||
"Input:\n"
|
||||
f"{json.dumps(schema_dict, indent=2)}\n\n"
|
||||
"Output (only JSON schema):"
|
||||
)
|
||||
system_components.append(schema_info)
|
||||
except (ValidationError, ValueError, TypeError, KeyError) as e:
|
||||
await logger.aerror(f"Could not build schema for prompt: {e}", exc_info=True)
|
||||
|
||||
# Combine all components
|
||||
combined_instructions = "\n\n".join(system_components) if system_components else ""
|
||||
llm_model, self.chat_history, self.tools = await self.get_agent_requirements()
|
||||
self.set(
|
||||
llm=llm_model,
|
||||
tools=self.tools or [],
|
||||
chat_history=self.chat_history,
|
||||
input_value=self.input_value,
|
||||
system_prompt=combined_instructions,
|
||||
)
|
||||
|
||||
# Create and run structured chat agent
|
||||
try:
|
||||
structured_agent = self.create_agent_runnable()
|
||||
except (NotImplementedError, ValueError, TypeError) as e:
|
||||
await logger.aerror(f"Error with structured chat agent: {e}")
|
||||
raise
|
||||
try:
|
||||
result = await self.run_agent(structured_agent)
|
||||
except (ExceptionWithMessageError, ValueError, TypeError, RuntimeError) as e:
|
||||
await logger.aerror(f"Error with structured agent result: {e}")
|
||||
raise
|
||||
# Extract content from structured agent result
|
||||
if hasattr(result, "content"):
|
||||
content = result.content
|
||||
elif hasattr(result, "text"):
|
||||
content = result.text
|
||||
else:
|
||||
content = str(result)
|
||||
|
||||
except (ExceptionWithMessageError, ValueError, TypeError, NotImplementedError, AttributeError) as e:
|
||||
await logger.aerror(f"Error with structured chat agent: {e}")
|
||||
# Fallback to regular agent
|
||||
content_str = "No content returned from agent"
|
||||
return Data(data={"content": content_str, "error": str(e)})
|
||||
|
||||
# Process with structured output validation
|
||||
try:
|
||||
structured_output = await self.build_structured_output_base(content)
|
||||
|
||||
# Handle different output formats
|
||||
if isinstance(structured_output, list) and structured_output:
|
||||
if len(structured_output) == 1:
|
||||
return Data(data=structured_output[0])
|
||||
return Data(data={"results": structured_output})
|
||||
if isinstance(structured_output, dict):
|
||||
return Data(data=structured_output)
|
||||
return Data(data={"content": content})
|
||||
|
||||
except (ValueError, TypeError) as e:
|
||||
await logger.aerror(f"Error in structured output processing: {e}")
|
||||
return Data(data={"content": content, "error": str(e)})
|
||||
|
||||
async def get_memory_data(self):
|
||||
# TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.
|
||||
|
|
@ -190,7 +384,7 @@ class AgentComponent(ToolCallingAgentComponent):
|
|||
message for message in messages if getattr(message, "id", None) != getattr(self.input_value, "id", None)
|
||||
]
|
||||
|
||||
def get_llm(self):
|
||||
async def get_llm(self):
|
||||
if not isinstance(self.agent_llm, str):
|
||||
return self.agent_llm, None
|
||||
|
||||
|
|
@ -207,8 +401,8 @@ class AgentComponent(ToolCallingAgentComponent):
|
|||
|
||||
return self._build_llm_model(component_class, inputs, prefix), display_name
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error building {self.agent_llm} language model: {e!s}")
|
||||
except (AttributeError, ValueError, TypeError, RuntimeError) as e:
|
||||
await logger.aerror(f"Error building {self.agent_llm} language model: {e!s}")
|
||||
msg = f"Failed to initialize language model: {e!s}"
|
||||
raise ValueError(msg) from e
|
||||
|
||||
|
|
@ -289,6 +483,7 @@ class AgentComponent(ToolCallingAgentComponent):
|
|||
build_config.update(fields_to_add)
|
||||
# Reset input types for agent_llm
|
||||
build_config["agent_llm"]["input_types"] = []
|
||||
build_config["agent_llm"]["display_name"] = "Model Provider"
|
||||
elif field_value == "Custom":
|
||||
# Delete all provider fields
|
||||
self.delete_fields(build_config, ALL_PROVIDER_FIELDS)
|
||||
|
|
|
|||
|
|
@ -21,11 +21,10 @@ from langflow.io.schema import flatten_schema, schema_to_langflow_inputs
|
|||
from langflow.logging import logger
|
||||
from langflow.schema.dataframe import DataFrame
|
||||
from langflow.schema.message import Message
|
||||
from langflow.services.auth.utils import create_user_longterm_token
|
||||
|
||||
# Import get_server from the backend API
|
||||
from langflow.services.database.models.user.crud import get_user_by_id
|
||||
from langflow.services.deps import get_session, get_settings_service, get_storage_service
|
||||
from langflow.services.deps import get_settings_service, get_storage_service, session_scope
|
||||
|
||||
|
||||
class MCPToolsComponent(ComponentWithCache):
|
||||
|
|
@ -118,12 +117,12 @@ class MCPToolsComponent(ComponentWithCache):
|
|||
schema_inputs = schema_to_langflow_inputs(input_schema)
|
||||
if not schema_inputs:
|
||||
msg = f"No input parameters defined for tool '{tool_obj.name}'"
|
||||
logger.warning(msg)
|
||||
await logger.awarning(msg)
|
||||
return []
|
||||
|
||||
except Exception as e:
|
||||
msg = f"Error validating schema inputs: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise ValueError(msg) from e
|
||||
else:
|
||||
return schema_inputs
|
||||
|
|
@ -154,9 +153,11 @@ class MCPToolsComponent(ComponentWithCache):
|
|||
return self.tools, {"name": server_name, "config": server_config_from_value}
|
||||
|
||||
try:
|
||||
async for db in get_session():
|
||||
user_id, _ = await create_user_longterm_token(db)
|
||||
current_user = await get_user_by_id(db, user_id)
|
||||
async with session_scope() as db:
|
||||
if not self.user_id:
|
||||
msg = "User ID is required for fetching MCP tools."
|
||||
raise ValueError(msg)
|
||||
current_user = await get_user_by_id(db, self.user_id)
|
||||
|
||||
# Try to get server config from DB/API
|
||||
server_config = await get_server(
|
||||
|
|
@ -167,47 +168,48 @@ class MCPToolsComponent(ComponentWithCache):
|
|||
settings_service=get_settings_service(),
|
||||
)
|
||||
|
||||
# If get_server returns empty but we have a config, use it
|
||||
if not server_config and server_config_from_value:
|
||||
server_config = server_config_from_value
|
||||
# If get_server returns empty but we have a config, use it
|
||||
if not server_config and server_config_from_value:
|
||||
server_config = server_config_from_value
|
||||
|
||||
if not server_config:
|
||||
self.tools = []
|
||||
return [], {"name": server_name, "config": server_config}
|
||||
if not server_config:
|
||||
self.tools = []
|
||||
return [], {"name": server_name, "config": server_config}
|
||||
|
||||
_, tool_list, tool_cache = await update_tools(
|
||||
server_name=server_name,
|
||||
server_config=server_config,
|
||||
mcp_stdio_client=self.stdio_client,
|
||||
mcp_sse_client=self.sse_client,
|
||||
)
|
||||
_, tool_list, tool_cache = await update_tools(
|
||||
server_name=server_name,
|
||||
server_config=server_config,
|
||||
mcp_stdio_client=self.stdio_client,
|
||||
mcp_sse_client=self.sse_client,
|
||||
)
|
||||
|
||||
self.tool_names = [tool.name for tool in tool_list if hasattr(tool, "name")]
|
||||
self._tool_cache = tool_cache
|
||||
self.tools = tool_list
|
||||
# Cache the result using shared cache
|
||||
cache_data = {
|
||||
"tools": tool_list,
|
||||
"tool_names": self.tool_names,
|
||||
"tool_cache": tool_cache,
|
||||
"config": server_config,
|
||||
}
|
||||
self.tool_names = [tool.name for tool in tool_list if hasattr(tool, "name")]
|
||||
self._tool_cache = tool_cache
|
||||
self.tools = tool_list
|
||||
# Cache the result using shared cache
|
||||
cache_data = {
|
||||
"tools": tool_list,
|
||||
"tool_names": self.tool_names,
|
||||
"tool_cache": tool_cache,
|
||||
"config": server_config,
|
||||
}
|
||||
|
||||
# Safely update the servers cache
|
||||
current_servers_cache = safe_cache_get(self._shared_component_cache, "servers", {})
|
||||
if isinstance(current_servers_cache, dict):
|
||||
current_servers_cache[server_name] = cache_data
|
||||
safe_cache_set(self._shared_component_cache, "servers", current_servers_cache)
|
||||
# Safely update the servers cache
|
||||
current_servers_cache = safe_cache_get(self._shared_component_cache, "servers", {})
|
||||
if isinstance(current_servers_cache, dict):
|
||||
current_servers_cache[server_name] = cache_data
|
||||
safe_cache_set(self._shared_component_cache, "servers", current_servers_cache)
|
||||
|
||||
return tool_list, {"name": server_name, "config": server_config}
|
||||
except (TimeoutError, asyncio.TimeoutError) as e:
|
||||
msg = f"Timeout updating tool list: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise TimeoutError(msg) from e
|
||||
except Exception as e:
|
||||
msg = f"Error updating tool list: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise ValueError(msg) from e
|
||||
else:
|
||||
return tool_list, {"name": server_name, "config": server_config}
|
||||
|
||||
async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:
|
||||
"""Toggle the visibility of connection-specific fields based on the selected mode."""
|
||||
|
|
@ -221,7 +223,7 @@ class MCPToolsComponent(ComponentWithCache):
|
|||
build_config["tool"]["placeholder"] = "Select a tool"
|
||||
except (TimeoutError, asyncio.TimeoutError) as e:
|
||||
msg = f"Timeout updating tool list: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
if not build_config["tools_metadata"]["show"]:
|
||||
build_config["tool"]["show"] = True
|
||||
build_config["tool"]["options"] = []
|
||||
|
|
@ -247,7 +249,7 @@ class MCPToolsComponent(ComponentWithCache):
|
|||
break
|
||||
if tool_obj is None:
|
||||
msg = f"Tool {field_value} not found in available tools: {self.tools}"
|
||||
logger.warning(msg)
|
||||
await logger.awarning(msg)
|
||||
return build_config
|
||||
await self._update_tool_config(build_config, field_value)
|
||||
except Exception as e:
|
||||
|
|
@ -331,7 +333,7 @@ class MCPToolsComponent(ComponentWithCache):
|
|||
|
||||
except Exception as e:
|
||||
msg = f"Error in update_build_config: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise ValueError(msg) from e
|
||||
else:
|
||||
return build_config
|
||||
|
|
@ -384,7 +386,7 @@ class MCPToolsComponent(ComponentWithCache):
|
|||
msg = f"Tool {tool_name} not found in available tools: {self.tools}"
|
||||
self.remove_non_default_keys(build_config)
|
||||
build_config["tool"]["value"] = ""
|
||||
logger.warning(msg)
|
||||
await logger.awarning(msg)
|
||||
return
|
||||
|
||||
try:
|
||||
|
|
@ -402,14 +404,14 @@ class MCPToolsComponent(ComponentWithCache):
|
|||
self.schema_inputs = await self._validate_schema_inputs(tool_obj)
|
||||
if not self.schema_inputs:
|
||||
msg = f"No input parameters to configure for tool '{tool_name}'"
|
||||
logger.info(msg)
|
||||
await logger.ainfo(msg)
|
||||
return
|
||||
|
||||
# Add new inputs to build config
|
||||
for schema_input in self.schema_inputs:
|
||||
if not schema_input or not hasattr(schema_input, "name"):
|
||||
msg = "Invalid schema input detected, skipping"
|
||||
logger.warning(msg)
|
||||
await logger.awarning(msg)
|
||||
continue
|
||||
|
||||
try:
|
||||
|
|
@ -426,16 +428,16 @@ class MCPToolsComponent(ComponentWithCache):
|
|||
|
||||
except (AttributeError, KeyError, TypeError) as e:
|
||||
msg = f"Error processing schema input {schema_input}: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
continue
|
||||
except ValueError as e:
|
||||
msg = f"Schema validation error for tool {tool_name}: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
self.schema_inputs = []
|
||||
return
|
||||
except (AttributeError, KeyError, TypeError) as e:
|
||||
msg = f"Error updating tool config: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise ValueError(msg) from e
|
||||
|
||||
async def build_output(self) -> DataFrame:
|
||||
|
|
@ -472,7 +474,7 @@ class MCPToolsComponent(ComponentWithCache):
|
|||
return DataFrame(data=[{"error": "You must select a tool"}])
|
||||
except Exception as e:
|
||||
msg = f"Error in build_output: {e!s}"
|
||||
logger.exception(msg)
|
||||
await logger.aexception(msg)
|
||||
raise ValueError(msg) from e
|
||||
|
||||
def _get_session_context(self) -> str | None:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
from typing import Any, cast
|
||||
|
||||
import requests
|
||||
from loguru import logger
|
||||
from pydantic import ValidationError
|
||||
|
||||
from langflow.base.models.anthropic_constants import (
|
||||
|
|
@ -14,6 +13,7 @@ from langflow.base.models.model import LCModelComponent
|
|||
from langflow.field_typing import LanguageModel
|
||||
from langflow.field_typing.range_spec import RangeSpec
|
||||
from langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput, SliderInput
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.dotdict import dotdict
|
||||
|
||||
|
||||
|
|
@ -101,7 +101,7 @@ class AnthropicModelComponent(LCModelComponent):
|
|||
|
||||
return output
|
||||
|
||||
def get_models(self, tool_model_enabled: bool | None = None) -> list[str]:
|
||||
def get_models(self, *, tool_model_enabled: bool | None = None) -> list[str]:
|
||||
try:
|
||||
import anthropic
|
||||
|
||||
|
|
@ -129,7 +129,7 @@ class AnthropicModelComponent(LCModelComponent):
|
|||
model_with_tool = ChatAnthropic(
|
||||
model=model, # Use the current model being checked
|
||||
anthropic_api_key=self.api_key,
|
||||
anthropic_api_url=cast(str, self.base_url) or DEFAULT_ANTHROPIC_API_URL,
|
||||
anthropic_api_url=cast("str", self.base_url) or DEFAULT_ANTHROPIC_API_URL,
|
||||
)
|
||||
|
||||
if (
|
||||
|
|
@ -177,8 +177,9 @@ class AnthropicModelComponent(LCModelComponent):
|
|||
except (ImportError, ValueError, requests.exceptions.RequestException) as e:
|
||||
logger.exception(f"Error getting model names: {e}")
|
||||
ids = ANTHROPIC_MODELS
|
||||
build_config.setdefault("model_name", {})
|
||||
build_config["model_name"]["options"] = ids
|
||||
build_config["model_name"]["value"] = ids[0]
|
||||
build_config["model_name"].setdefault("value", ids[0])
|
||||
build_config["model_name"]["combobox"] = True
|
||||
except Exception as e:
|
||||
msg = f"Error getting model names: {e}"
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
import assemblyai as aai
|
||||
from loguru import logger
|
||||
|
||||
from langflow.custom.custom_component.component import Component
|
||||
from langflow.io import DataInput, DropdownInput, IntInput, Output, SecretStrInput
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.data import Data
|
||||
|
||||
|
||||
|
|
@ -58,7 +58,7 @@ class AssemblyAIGetSubtitles(Component):
|
|||
transcript = aai.Transcript.get_by_id(transcript_id)
|
||||
except Exception as e: # noqa: BLE001
|
||||
error = f"Getting transcription failed: {e}"
|
||||
logger.opt(exception=True).debug(error)
|
||||
logger.debug(error, exc_info=True)
|
||||
self.status = error
|
||||
return Data(data={"error": error})
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
import assemblyai as aai
|
||||
from loguru import logger
|
||||
|
||||
from langflow.custom.custom_component.component import Component
|
||||
from langflow.io import DataInput, DropdownInput, FloatInput, IntInput, MultilineInput, Output, SecretStrInput
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.data import Data
|
||||
|
||||
|
||||
|
|
@ -131,7 +131,7 @@ class AssemblyAILeMUR(Component):
|
|||
try:
|
||||
response = self.perform_lemur_action(transcript_group, self.endpoint)
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.opt(exception=True).debug("Error running LeMUR")
|
||||
logger.debug("Error running LeMUR", exc_info=True)
|
||||
error = f"An Error happened: {e}"
|
||||
self.status = error
|
||||
return Data(data={"error": error})
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
import assemblyai as aai
|
||||
from loguru import logger
|
||||
|
||||
from langflow.custom.custom_component.component import Component
|
||||
from langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.data import Data
|
||||
|
||||
|
||||
|
|
@ -86,7 +86,7 @@ class AssemblyAIListTranscripts(Component):
|
|||
transcripts = convert_page_to_data_list(page)
|
||||
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.opt(exception=True).debug("Error listing transcripts")
|
||||
logger.debug("Error listing transcripts", exc_info=True)
|
||||
error_data = Data(data={"error": f"An error occurred: {e}"})
|
||||
self.status = [error_data]
|
||||
return [error_data]
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
import assemblyai as aai
|
||||
from loguru import logger
|
||||
|
||||
from langflow.custom.custom_component.component import Component
|
||||
from langflow.field_typing.range_spec import RangeSpec
|
||||
from langflow.io import DataInput, FloatInput, Output, SecretStrInput
|
||||
from langflow.logging.logger import logger
|
||||
from langflow.schema.data import Data
|
||||
|
||||
|
||||
|
|
@ -54,7 +54,7 @@ class AssemblyAITranscriptionJobPoller(Component):
|
|||
transcript = aai.Transcript.get_by_id(self.transcript_id.data["transcript_id"])
|
||||
except Exception as e: # noqa: BLE001
|
||||
error = f"Getting transcription failed: {e}"
|
||||
logger.opt(exception=True).debug(error)
|
||||
logger.debug(error, exc_info=True)
|
||||
self.status = error
|
||||
return Data(data={"error": error})
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue