diff --git a/Makefile b/Makefile index 15337f65b..baf4220ff 100644 --- a/Makefile +++ b/Makefile @@ -43,6 +43,7 @@ install_backend: poetry install backend: + make install_backend poetry run uvicorn langflow.main:app --port 7860 --reload --log-level debug build_frontend: diff --git a/poetry.lock b/poetry.lock index 5e53d5282..58498c4d1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand. [[package]] name = "aiofiles" @@ -148,6 +148,27 @@ files = [ {file = "aiostream-0.4.5.tar.gz", hash = "sha256:3ecbf87085230fbcd9605c32ca20c4fb41af02c71d076eab246ea22e35947d88"}, ] +[[package]] +name = "anthropic" +version = "0.2.10" +description = "Library for accessing the anthropic API" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "anthropic-0.2.10-py3-none-any.whl", hash = "sha256:a007496207fd186b0bcb9592b00ca130069d2a427f3d6f602a61dbbd1ac6316e"}, + {file = "anthropic-0.2.10.tar.gz", hash = "sha256:e4da061a86d8ffb86072c0b0feaf219a3a4f7dfddd4224df9ba769e469498c19"}, +] + +[package.dependencies] +aiohttp = "*" +httpx = "*" +requests = "*" +tokenizers = "*" + +[package.extras] +dev = ["black (>=22.3.0)", "pytest"] + [[package]] name = "anyio" version = "3.7.0" @@ -719,14 +740,14 @@ superset = ["apache-superset (>=1.4.1)"] [[package]] name = "cohere" -version = "4.6.0" +version = "4.7.0" description = "" category = "main" optional = false python-versions = ">=3.7,<4.0" files = [ - {file = "cohere-4.6.0-py3-none-any.whl", hash = "sha256:fc60fa73a2d96bdb9f70da4a290d3ede320b74ac01a24c229011049d7cb3511f"}, - {file = "cohere-4.6.0.tar.gz", hash = "sha256:43218a0a40f6fc023e068732994fb631ce5d160a0bc9f9a3a22524b5932f34ea"}, + {file = "cohere-4.7.0-py3-none-any.whl", hash = "sha256:ed15621bd271b941110a572cbf6187a4db78cc8d7d9d35a881bffcaeeea21d7c"}, + {file = "cohere-4.7.0.tar.gz", hash = "sha256:46c674545ad36133a555c6db7f25bdc299e05ec4f77dee707b661a57eb06aea6"}, ] [package.dependencies] @@ -862,31 +883,31 @@ toml = ["tomli"] [[package]] name = "cryptography" -version = "40.0.2" +version = "41.0.1" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "cryptography-40.0.2-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:8f79b5ff5ad9d3218afb1e7e20ea74da5f76943ee5edb7f76e56ec5161ec782b"}, - {file = "cryptography-40.0.2-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:05dc219433b14046c476f6f09d7636b92a1c3e5808b9a6536adf4932b3b2c440"}, - {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4df2af28d7bedc84fe45bd49bc35d710aede676e2a4cb7fc6d103a2adc8afe4d"}, - {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dcca15d3a19a66e63662dc8d30f8036b07be851a8680eda92d079868f106288"}, - {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:a04386fb7bc85fab9cd51b6308633a3c271e3d0d3eae917eebab2fac6219b6d2"}, - {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:adc0d980fd2760c9e5de537c28935cc32b9353baaf28e0814df417619c6c8c3b"}, - {file = "cryptography-40.0.2-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:d5a1bd0e9e2031465761dfa920c16b0065ad77321d8a8c1f5ee331021fda65e9"}, - {file = "cryptography-40.0.2-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:a95f4802d49faa6a674242e25bfeea6fc2acd915b5e5e29ac90a32b1139cae1c"}, - {file = "cryptography-40.0.2-cp36-abi3-win32.whl", hash = "sha256:aecbb1592b0188e030cb01f82d12556cf72e218280f621deed7d806afd2113f9"}, - {file = "cryptography-40.0.2-cp36-abi3-win_amd64.whl", hash = "sha256:b12794f01d4cacfbd3177b9042198f3af1c856eedd0a98f10f141385c809a14b"}, - {file = "cryptography-40.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:142bae539ef28a1c76794cca7f49729e7c54423f615cfd9b0b1fa90ebe53244b"}, - {file = "cryptography-40.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:956ba8701b4ffe91ba59665ed170a2ebbdc6fc0e40de5f6059195d9f2b33ca0e"}, - {file = "cryptography-40.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4f01c9863da784558165f5d4d916093737a75203a5c5286fde60e503e4276c7a"}, - {file = "cryptography-40.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3daf9b114213f8ba460b829a02896789751626a2a4e7a43a28ee77c04b5e4958"}, - {file = "cryptography-40.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48f388d0d153350f378c7f7b41497a54ff1513c816bcbbcafe5b829e59b9ce5b"}, - {file = "cryptography-40.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c0764e72b36a3dc065c155e5b22f93df465da9c39af65516fe04ed3c68c92636"}, - {file = "cryptography-40.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:cbaba590180cba88cb99a5f76f90808a624f18b169b90a4abb40c1fd8c19420e"}, - {file = "cryptography-40.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7a38250f433cd41df7fcb763caa3ee9362777fdb4dc642b9a349721d2bf47404"}, - {file = "cryptography-40.0.2.tar.gz", hash = "sha256:c33c0d32b8594fa647d2e01dbccc303478e16fdd7cf98652d5b3ed11aa5e5c99"}, + {file = "cryptography-41.0.1-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:f73bff05db2a3e5974a6fd248af2566134d8981fd7ab012e5dd4ddb1d9a70699"}, + {file = "cryptography-41.0.1-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:1a5472d40c8f8e91ff7a3d8ac6dfa363d8e3138b961529c996f3e2df0c7a411a"}, + {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fa01527046ca5facdf973eef2535a27fec4cb651e4daec4d043ef63f6ecd4ca"}, + {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b46e37db3cc267b4dea1f56da7346c9727e1209aa98487179ee8ebed09d21e43"}, + {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d198820aba55660b4d74f7b5fd1f17db3aa5eb3e6893b0a41b75e84e4f9e0e4b"}, + {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:948224d76c4b6457349d47c0c98657557f429b4e93057cf5a2f71d603e2fc3a3"}, + {file = "cryptography-41.0.1-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:059e348f9a3c1950937e1b5d7ba1f8e968508ab181e75fc32b879452f08356db"}, + {file = "cryptography-41.0.1-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:b4ceb5324b998ce2003bc17d519080b4ec8d5b7b70794cbd2836101406a9be31"}, + {file = "cryptography-41.0.1-cp37-abi3-win32.whl", hash = "sha256:8f4ab7021127a9b4323537300a2acfb450124b2def3756f64dc3a3d2160ee4b5"}, + {file = "cryptography-41.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:1fee5aacc7367487b4e22484d3c7e547992ed726d14864ee33c0176ae43b0d7c"}, + {file = "cryptography-41.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9a6c7a3c87d595608a39980ebaa04d5a37f94024c9f24eb7d10262b92f739ddb"}, + {file = "cryptography-41.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5d092fdfedaec4cbbffbf98cddc915ba145313a6fdaab83c6e67f4e6c218e6f3"}, + {file = "cryptography-41.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a8e6c2de6fbbcc5e14fd27fb24414507cb3333198ea9ab1258d916f00bc3039"}, + {file = "cryptography-41.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cb33ccf15e89f7ed89b235cff9d49e2e62c6c981a6061c9c8bb47ed7951190bc"}, + {file = "cryptography-41.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5f0ff6e18d13a3de56f609dd1fd11470918f770c6bd5d00d632076c727d35485"}, + {file = "cryptography-41.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7bfc55a5eae8b86a287747053140ba221afc65eb06207bedf6e019b8934b477c"}, + {file = "cryptography-41.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:eb8163f5e549a22888c18b0d53d6bb62a20510060a22fd5a995ec8a05268df8a"}, + {file = "cryptography-41.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8dde71c4169ec5ccc1087bb7521d54251c016f126f922ab2dfe6649170a3b8c5"}, + {file = "cryptography-41.0.1.tar.gz", hash = "sha256:d34579085401d3f49762d2f7d6634d6b6c2ae1242202e860f4d26b046e3a1006"}, ] [package.dependencies] @@ -895,23 +916,23 @@ cffi = ">=1.12" [package.extras] docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] -pep8test = ["black", "check-manifest", "mypy", "ruff"] -sdist = ["setuptools-rust (>=0.11.4)"] +nox = ["nox"] +pep8test = ["black", "check-sdist", "mypy", "ruff"] +sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-shard (>=0.1.2)", "pytest-subtests", "pytest-xdist"] +test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] -tox = ["tox"] [[package]] name = "ctransformers" -version = "0.2.2" +version = "0.2.5" description = "Python bindings for the Transformer models implemented in C/C++ using GGML library." category = "main" optional = false python-versions = "*" files = [ - {file = "ctransformers-0.2.2-py3-none-any.whl", hash = "sha256:bf682dd0293dd87911c9a9a1169a4873ff55baebc16d465c6029c77f11b18cf6"}, - {file = "ctransformers-0.2.2.tar.gz", hash = "sha256:1fc36b3fde36d9fd3cb69e48993315bb1f5f54ae552720eb909dc4b3a131c743"}, + {file = "ctransformers-0.2.5-py3-none-any.whl", hash = "sha256:5e0ee7d2be2cb1d627a702acdbf1f4f3c9c97d706e9d7f59a13079c1836a1432"}, + {file = "ctransformers-0.2.5.tar.gz", hash = "sha256:b813f19d5c2249b75422ae3188b1c834aeb8a095800df32328559a740acdb404"}, ] [package.dependencies] @@ -1044,14 +1065,14 @@ weaviate = ["weaviate-client (>=3.9.0,<3.10.0)"] [[package]] name = "docker" -version = "6.1.2" +version = "6.1.3" description = "A Python library for the Docker Engine API." category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "docker-6.1.2-py3-none-any.whl", hash = "sha256:134cd828f84543cbf8e594ff81ca90c38288df3c0a559794c12f2e4b634ea19e"}, - {file = "docker-6.1.2.tar.gz", hash = "sha256:dcc088adc2ec4e7cfc594e275d8bd2c9738c56c808de97476939ef67db5af8c2"}, + {file = "docker-6.1.3-py3-none-any.whl", hash = "sha256:aecd2277b8bf8e506e484f6ab7aec39abe0038e29fa4a6d3ba86c3fe01844ed9"}, + {file = "docker-6.1.3.tar.gz", hash = "sha256:aa6d17830045ba5ef0168d5eaa34d37beeb113948c413affe1d5991fc11f9a20"}, ] [package.dependencies] @@ -1096,9 +1117,7 @@ files = [ {file = "duckdb-0.8.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b2707096d6df4321044fcde2c9f04da632d11a8be60957fd09d49a42fae71a29"}, {file = "duckdb-0.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b27df1b70ae74d2c88efb5ffca8490954fdc678099509a9c4404ca30acc53426"}, {file = "duckdb-0.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75a97c800271b52dd0f37696d074c50576dcb4b2750b6115932a98696a268070"}, - {file = "duckdb-0.8.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:804cac261a5e016506a6d67838a65d19b06a237f7949f1704f0e800eb708286a"}, {file = "duckdb-0.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6b9abca7fa6713e1d031c18485343b4de99742c7e1b85c10718aa2f31a4e2c6"}, - {file = "duckdb-0.8.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:51aa6d606d49072abcfeb3be209eb559ac94c1b5e70f58ac3adbb94aca9cd69f"}, {file = "duckdb-0.8.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7c8dc769aaf2be0a1c57995ca657e5b92c1c56fc8437edb720ca6cab571adf14"}, {file = "duckdb-0.8.0-cp311-cp311-win32.whl", hash = "sha256:c4207d18b42387c4a035846d8878eb967070198be8ac26fd77797ce320d1a400"}, {file = "duckdb-0.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:0c392257547c20794c3072fcbca99a49ef0a49974005d755e93893e2b4875267"}, @@ -1194,6 +1213,41 @@ files = [ [package.extras] tests = ["asttokens", "littleutils", "pytest", "rich"] +[[package]] +name = "faiss-cpu" +version = "1.7.4" +description = "A library for efficient similarity search and clustering of dense vectors." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "faiss-cpu-1.7.4.tar.gz", hash = "sha256:265dc31b0c079bf4433303bf6010f73922490adff9188b915e2d3f5e9c82dd0a"}, + {file = "faiss_cpu-1.7.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:50d4ebe7f1869483751c558558504f818980292a9b55be36f9a1ee1009d9a686"}, + {file = "faiss_cpu-1.7.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7b1db7fae7bd8312aeedd0c41536bcd19a6e297229e1dce526bde3a73ab8c0b5"}, + {file = "faiss_cpu-1.7.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17b7fa7194a228a84929d9e6619d0e7dbf00cc0f717e3462253766f5e3d07de8"}, + {file = "faiss_cpu-1.7.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dca531952a2e3eac56f479ff22951af4715ee44788a3fe991d208d766d3f95f3"}, + {file = "faiss_cpu-1.7.4-cp310-cp310-win_amd64.whl", hash = "sha256:7173081d605e74766f950f2e3d6568a6f00c53f32fd9318063e96728c6c62821"}, + {file = "faiss_cpu-1.7.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d0bbd6f55d7940cc0692f79e32a58c66106c3c950cee2341b05722de9da23ea3"}, + {file = "faiss_cpu-1.7.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e13c14280376100f143767d0efe47dcb32618f69e62bbd3ea5cd38c2e1755926"}, + {file = "faiss_cpu-1.7.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c521cb8462f3b00c0c7dfb11caff492bb67816528b947be28a3b76373952c41d"}, + {file = "faiss_cpu-1.7.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afdd9fe1141117fed85961fd36ee627c83fc3b9fd47bafb52d3c849cc2f088b7"}, + {file = "faiss_cpu-1.7.4-cp311-cp311-win_amd64.whl", hash = "sha256:2ff7f57889ea31d945e3b87275be3cad5d55b6261a4e3f51c7aba304d76b81fb"}, + {file = "faiss_cpu-1.7.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:eeaf92f27d76249fb53c1adafe617b0f217ab65837acf7b4ec818511caf6e3d8"}, + {file = "faiss_cpu-1.7.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:102b1bd763e9b0c281ac312590af3eaf1c8b663ccbc1145821fe6a9f92b8eaaf"}, + {file = "faiss_cpu-1.7.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5512da6707c967310c46ff712b00418b7ae28e93cb609726136e826e9f2f14fa"}, + {file = "faiss_cpu-1.7.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0c2e5b9d8c28c99f990e87379d5bbcc6c914da91ebb4250166864fd12db5755b"}, + {file = "faiss_cpu-1.7.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:43f67f325393145d360171cd98786fcea6120ce50397319afd3bb78be409fb8a"}, + {file = "faiss_cpu-1.7.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6a4e4af194b8fce74c4b770cad67ad1dd1b4673677fc169723e4c50ba5bd97a8"}, + {file = "faiss_cpu-1.7.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31bfb7b9cffc36897ae02a983e04c09fe3b8c053110a287134751a115334a1df"}, + {file = "faiss_cpu-1.7.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52d7de96abef2340c0d373c1f5cbc78026a3cebb0f8f3a5920920a00210ead1f"}, + {file = "faiss_cpu-1.7.4-cp38-cp38-win_amd64.whl", hash = "sha256:699feef85b23c2c729d794e26ca69bebc0bee920d676028c06fd0e0becc15c7e"}, + {file = "faiss_cpu-1.7.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:559a0133f5ed44422acb09ee1ac0acffd90c6666d1bc0d671c18f6e93ad603e2"}, + {file = "faiss_cpu-1.7.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ea1d71539fe3dc0f1bed41ef954ca701678776f231046bf0ca22ccea5cf5bef6"}, + {file = "faiss_cpu-1.7.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12d45e0157024eb3249842163162983a1ac8b458f1a8b17bbf86f01be4585a99"}, + {file = "faiss_cpu-1.7.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f0eab359e066d32c874f51a7d4bf6440edeec068b7fe47e6d803c73605a8b4c"}, + {file = "faiss_cpu-1.7.4-cp39-cp39-win_amd64.whl", hash = "sha256:98459ceeeb735b9df1a5b94572106ffe0a6ce740eb7e4626715dd218657bb4dc"}, +] + [[package]] name = "fake-useragent" version = "1.1.3" @@ -1211,14 +1265,14 @@ importlib-resources = {version = ">=5.0", markers = "python_version < \"3.10\""} [[package]] name = "fastapi" -version = "0.95.2" +version = "0.96.0" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "fastapi-0.95.2-py3-none-any.whl", hash = "sha256:d374dbc4ef2ad9b803899bd3360d34c534adc574546e25314ab72c0c4411749f"}, - {file = "fastapi-0.95.2.tar.gz", hash = "sha256:4d9d3e8c71c73f11874bcf5e33626258d143252e329a01002f767306c64fb982"}, + {file = "fastapi-0.96.0-py3-none-any.whl", hash = "sha256:b8e11fe81e81eab4e1504209917338e0b80f783878a42c2b99467e5e1019a1e9"}, + {file = "fastapi-0.96.0.tar.gz", hash = "sha256:71232d47c2787446991c81c41c249f8a16238d52d779c0e6b43927d3773dbe3c"}, ] [package.dependencies] @@ -1387,14 +1441,14 @@ uritemplate = ">=3.0.1,<5" [[package]] name = "google-auth" -version = "2.19.0" +version = "2.19.1" description = "Google Authentication Library" category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "google-auth-2.19.0.tar.gz", hash = "sha256:f39d528077ac540793dd3c22a8706178f157642a67d874db25c640b7fead277e"}, - {file = "google_auth-2.19.0-py2.py3-none-any.whl", hash = "sha256:be617bfaf77774008e9d177573f782e109188c8a64ae6e744285df5cea3e7df6"}, + {file = "google-auth-2.19.1.tar.gz", hash = "sha256:a9cfa88b3e16196845e64a3658eb953992129d13ac7337b064c6546f77c17183"}, + {file = "google_auth-2.19.1-py2.py3-none-any.whl", hash = "sha256:ea165e014c7cbd496558796b627c271aa8c18b4cba79dc1cc962b24c5efdfb85"}, ] [package.dependencies] @@ -2037,14 +2091,14 @@ test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio" [[package]] name = "ipython" -version = "8.13.2" +version = "8.14.0" description = "IPython: Productive Interactive Computing" category = "dev" optional = false python-versions = ">=3.9" files = [ - {file = "ipython-8.13.2-py3-none-any.whl", hash = "sha256:ffca270240fbd21b06b2974e14a86494d6d29290184e788275f55e0b55914926"}, - {file = "ipython-8.13.2.tar.gz", hash = "sha256:7dff3fad32b97f6488e02f87b970f309d082f758d7b7fc252e3b19ee0e432dbb"}, + {file = "ipython-8.14.0-py3-none-any.whl", hash = "sha256:248aca623f5c99a6635bc3857677b7320b9b8039f99f070ee0d20a5ca5a8e6bf"}, + {file = "ipython-8.14.0.tar.gz", hash = "sha256:1d197b907b6ba441b692c48cf2a3a2de280dc0ac91a3405b39349a50272ca0a1"}, ] [package.dependencies] @@ -2376,13 +2430,13 @@ text-helpers = ["chardet (>=5.1.0,<6.0.0)"] [[package]] name = "langchain-serve" -version = "0.0.40" +version = "0.0.41" description = "Langchain Serve - serve your langchain apps on Jina AI Cloud." category = "main" optional = true python-versions = "*" files = [ - {file = "langchain-serve-0.0.40.tar.gz", hash = "sha256:c60b173fcf0b682fbb70d34e8f485ce168e2229f55cb5c4ffbc26a5206af1c06"}, + {file = "langchain-serve-0.0.41.tar.gz", hash = "sha256:fcf0d3ac9e48b5b24825ae2e1d8a383795c5744b18ef61cb74a99020a9e5d46a"}, ] [package.dependencies] @@ -4679,14 +4733,14 @@ idna2008 = ["idna"] [[package]] name = "rich" -version = "13.3.5" +version = "13.4.1" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" category = "main" optional = false python-versions = ">=3.7.0" files = [ - {file = "rich-13.3.5-py3-none-any.whl", hash = "sha256:69cdf53799e63f38b95b9bf9c875f8c90e78dd62b2f00c13a911c7a3b9fa4704"}, - {file = "rich-13.3.5.tar.gz", hash = "sha256:2d11b9b8dd03868f09b4fffadc84a6a8cda574e40dc90821bd845720ebb8e89c"}, + {file = "rich-13.4.1-py3-none-any.whl", hash = "sha256:d204aadb50b936bf6b1a695385429d192bc1fdaf3e8b907e8e26f4c4e4b5bf75"}, + {file = "rich-13.4.1.tar.gz", hash = "sha256:76f6b65ea7e5c5d924ba80e322231d7cb5b5981aa60bfc1e694f1bc097fe6fe1"}, ] [package.dependencies] @@ -5100,14 +5154,14 @@ doc = ["reno", "sphinx", "tornado (>=4.5)"] [[package]] name = "textual" -version = "0.26.0" +version = "0.27.0" description = "Modern Text User Interface framework" category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ - {file = "textual-0.26.0-py3-none-any.whl", hash = "sha256:1efd04e9f61b3e95fd1c65436d3262f99e3f86cdeb524d13045bb551eb615c02"}, - {file = "textual-0.26.0.tar.gz", hash = "sha256:78094c83017d2836b726513abdf434cc034a0e68cc45e63b3b056c9b8b7fa673"}, + {file = "textual-0.27.0-py3-none-any.whl", hash = "sha256:dc45eaf7da330686c56d6f76f59d05fd216ce6aad90fa44ee269881efc622151"}, + {file = "textual-0.27.0.tar.gz", hash = "sha256:8bdcb09dc35a706ef939b1276ccfdec10eaaee6147b41cb7587cf33298a8dd33"}, ] [package.dependencies] @@ -6180,4 +6234,4 @@ deploy = ["langchain-serve"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "2bf357ad30f79c68751b34c991b4a73767ceb628657f4133228d4eb487d8a6fb" +content-hash = "4cac7dea0c1222711ba7eed82d5716d5e361d454edb6e0299b387fbb115d2c3d" diff --git a/pyproject.toml b/pyproject.toml index 78104f327..87843198b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langflow" -version = "0.0.79" +version = "0.0.80" description = "A Python package with a built-in web application" authors = ["Logspace "] maintainers = [ @@ -22,7 +22,7 @@ langflow = "langflow.__main__:main" [tool.poetry.dependencies] python = ">=3.9,<3.12" -fastapi = "^0.95.0" +fastapi = "^0.96.0" uvicorn = "^0.20.0" beautifulsoup4 = "^4.11.2" google-search-results = "^2.4.1" @@ -57,6 +57,8 @@ jina = "3.15.2" sentence-transformers = "^2.2.2" ctransformers = "^0.2.2" cohere = "^4.6.0" +faiss-cpu = "^1.7.4" +anthropic = "^0.2.9" [tool.poetry.group.dev.dependencies] @@ -76,6 +78,15 @@ types-pillow = "^9.5.0.2" [tool.poetry.extras] deploy = ["langchain-serve"] +[tool.pytest.ini_options] +minversion = "6.0" +addopts = "-ra" +testpaths = ["tests", "integration"] +console_output_style = "progress" +filterwarnings = ["ignore::DeprecationWarning"] +log_cli = true + + [tool.ruff] line-length = 120 diff --git a/src/backend/langflow/__init__.py b/src/backend/langflow/__init__.py index 35fe814d2..17b1d940c 100644 --- a/src/backend/langflow/__init__.py +++ b/src/backend/langflow/__init__.py @@ -1,4 +1,4 @@ from langflow.cache import cache_manager -from langflow.interface.loading import load_flow_from_json +from langflow.processing.process import load_flow_from_json __all__ = ["load_flow_from_json", "cache_manager"] diff --git a/src/backend/langflow/api/__init__.py b/src/backend/langflow/api/__init__.py index e69de29bb..f887c47e1 100644 --- a/src/backend/langflow/api/__init__.py +++ b/src/backend/langflow/api/__init__.py @@ -0,0 +1,3 @@ +from langflow.api.router import router + +__all__ = ["router"] diff --git a/src/backend/langflow/api/router.py b/src/backend/langflow/api/router.py new file mode 100644 index 000000000..23b5aa1c5 --- /dev/null +++ b/src/backend/langflow/api/router.py @@ -0,0 +1,8 @@ +# Router for base api +from fastapi import APIRouter +from langflow.api.v1 import chat_router, endpoints_router, validate_router + +router = APIRouter(prefix="/api/v1", tags=["api"]) +router.include_router(chat_router) +router.include_router(endpoints_router) +router.include_router(validate_router) diff --git a/src/backend/langflow/api/v1/__init__.py b/src/backend/langflow/api/v1/__init__.py new file mode 100644 index 000000000..d835b4535 --- /dev/null +++ b/src/backend/langflow/api/v1/__init__.py @@ -0,0 +1,5 @@ +from langflow.api.v1.endpoints import router as endpoints_router +from langflow.api.v1.validate import router as validate_router +from langflow.api.v1.chat import router as chat_router + +__all__ = ["chat_router", "endpoints_router", "validate_router"] diff --git a/src/backend/langflow/api/base.py b/src/backend/langflow/api/v1/base.py similarity index 96% rename from src/backend/langflow/api/base.py rename to src/backend/langflow/api/v1/base.py index 8cddc52e4..6941bedf3 100644 --- a/src/backend/langflow/api/base.py +++ b/src/backend/langflow/api/v1/base.py @@ -1,6 +1,6 @@ from pydantic import BaseModel, validator -from langflow.graph.utils import extract_input_variables_from_prompt +from langflow.interface.utils import extract_input_variables_from_prompt class CacheResponse(BaseModel): diff --git a/src/backend/langflow/api/callback.py b/src/backend/langflow/api/v1/callback.py similarity index 95% rename from src/backend/langflow/api/callback.py rename to src/backend/langflow/api/v1/callback.py index d63e107c4..b58393d7b 100644 --- a/src/backend/langflow/api/callback.py +++ b/src/backend/langflow/api/v1/callback.py @@ -3,7 +3,7 @@ from typing import Any from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler -from langflow.api.schemas import ChatResponse +from langflow.api.v1.schemas import ChatResponse # https://github.com/hwchase17/chat-langchain/blob/master/callback.py diff --git a/src/backend/langflow/api/chat.py b/src/backend/langflow/api/v1/chat.py similarity index 93% rename from src/backend/langflow/api/chat.py rename to src/backend/langflow/api/v1/chat.py index 4afa6c22f..7df4c65ed 100644 --- a/src/backend/langflow/api/chat.py +++ b/src/backend/langflow/api/v1/chat.py @@ -6,7 +6,7 @@ from fastapi import ( status, ) -from langflow.api.chat_manager import ChatManager +from langflow.chat.manager import ChatManager from langflow.utils.logger import logger router = APIRouter() diff --git a/src/backend/langflow/api/endpoints.py b/src/backend/langflow/api/v1/endpoints.py similarity index 87% rename from src/backend/langflow/api/endpoints.py rename to src/backend/langflow/api/v1/endpoints.py index 021a81ca8..1e9b0deb1 100644 --- a/src/backend/langflow/api/endpoints.py +++ b/src/backend/langflow/api/v1/endpoints.py @@ -3,13 +3,13 @@ from importlib.metadata import version from fastapi import APIRouter, HTTPException -from langflow.api.schemas import ( +from langflow.api.v1.schemas import ( ExportedFlow, GraphData, PredictRequest, PredictResponse, ) -from langflow.interface.run import process_graph_cached + from langflow.interface.types import build_langchain_types_dict # build router @@ -25,6 +25,8 @@ def get_all(): @router.post("/predict", response_model=PredictResponse) async def get_load(predict_request: PredictRequest): try: + from langflow.processing.process import process_graph_cached + exported_flow: ExportedFlow = predict_request.exported_flow graph_data: GraphData = exported_flow.data data = graph_data.dict() @@ -40,8 +42,3 @@ async def get_load(predict_request: PredictRequest): @router.get("/version") def get_version(): return {"version": version("langflow")} - - -@router.get("/health") -def get_health(): - return {"status": "OK"} diff --git a/src/backend/langflow/api/schemas.py b/src/backend/langflow/api/v1/schemas.py similarity index 100% rename from src/backend/langflow/api/schemas.py rename to src/backend/langflow/api/v1/schemas.py diff --git a/src/backend/langflow/api/validate.py b/src/backend/langflow/api/v1/validate.py similarity index 87% rename from src/backend/langflow/api/validate.py rename to src/backend/langflow/api/v1/validate.py index 53a7ee350..009cb9a30 100644 --- a/src/backend/langflow/api/validate.py +++ b/src/backend/langflow/api/v1/validate.py @@ -2,15 +2,15 @@ import json from fastapi import APIRouter, HTTPException -from langflow.api.base import ( +from langflow.api.v1.base import ( Code, CodeValidationResponse, Prompt, PromptValidationResponse, validate_prompt, ) -from langflow.graph.node.types import VectorStoreNode -from langflow.interface.run import build_graph +from langflow.graph.vertex.types import VectorStoreVertex +from langflow.graph import Graph from langflow.utils.logger import logger from langflow.utils.validate import validate_code @@ -44,12 +44,12 @@ def post_validate_prompt(prompt: Prompt): def post_validate_node(node_id: str, data: dict): try: # build graph - graph = build_graph(data) + graph = Graph.from_payload(data) # validate node node = graph.get_node(node_id) if node is None: raise ValueError(f"Node {node_id} not found") - if not isinstance(node, VectorStoreNode): + if not isinstance(node, VectorStoreVertex): node.build() return json.dumps({"valid": True, "params": str(node._built_object_repr())}) except Exception as e: diff --git a/src/backend/langflow/graph/node/__init__.py b/src/backend/langflow/chat/__init__.py similarity index 100% rename from src/backend/langflow/graph/node/__init__.py rename to src/backend/langflow/chat/__init__.py diff --git a/src/backend/langflow/api/chat_manager.py b/src/backend/langflow/chat/manager.py similarity index 85% rename from src/backend/langflow/api/chat_manager.py rename to src/backend/langflow/chat/manager.py index 8b1c7a621..d24057b68 100644 --- a/src/backend/langflow/api/chat_manager.py +++ b/src/backend/langflow/chat/manager.py @@ -1,21 +1,18 @@ -import asyncio -import json from collections import defaultdict -from typing import Dict, List - from fastapi import WebSocket, status - -from langflow.api.schemas import ChatMessage, ChatResponse, FileResponse +from langflow.api.v1.schemas import ChatMessage, ChatResponse, FileResponse from langflow.cache import cache_manager from langflow.cache.manager import Subject -from langflow.interface.run import ( - get_result_and_steps, - load_or_build_langchain_object, -) -from langflow.interface.utils import pil_to_base64, try_setting_streaming_options +from langflow.chat.utils import process_graph +from langflow.interface.utils import pil_to_base64 from langflow.utils.logger import logger +import asyncio +import json +from typing import Dict, List + + class ChatHistory(Subject): def __init__(self): super().__init__() @@ -191,33 +188,3 @@ class ChatManager: except Exception as e: logger.exception(e) self.disconnect(client_id) - - -async def process_graph( - graph_data: Dict, - is_first_message: bool, - chat_message: ChatMessage, - websocket: WebSocket, -): - langchain_object = load_or_build_langchain_object(graph_data, is_first_message) - langchain_object = try_setting_streaming_options(langchain_object, websocket) - logger.debug("Loaded langchain object") - - if langchain_object is None: - # Raise user facing error - raise ValueError( - "There was an error loading the langchain_object. Please, check all the nodes and try again." - ) - - # Generate result and thought - try: - logger.debug("Generating result and thought") - result, intermediate_steps = await get_result_and_steps( - langchain_object, chat_message.message or "", websocket=websocket - ) - logger.debug("Generated result and intermediate_steps") - return result, intermediate_steps - except Exception as e: - # Log stack trace - logger.exception(e) - raise e diff --git a/src/backend/langflow/chat/utils.py b/src/backend/langflow/chat/utils.py new file mode 100644 index 000000000..410a442be --- /dev/null +++ b/src/backend/langflow/chat/utils.py @@ -0,0 +1,41 @@ +from fastapi import WebSocket +from langflow.api.v1.schemas import ChatMessage +from langflow.processing.process import ( + load_or_build_langchain_object, +) +from langflow.processing.base import get_result_and_steps +from langflow.interface.utils import try_setting_streaming_options +from langflow.utils.logger import logger + + +from typing import Dict + + +async def process_graph( + graph_data: Dict, + is_first_message: bool, + chat_message: ChatMessage, + websocket: WebSocket, +): + langchain_object = load_or_build_langchain_object(graph_data, is_first_message) + langchain_object = try_setting_streaming_options(langchain_object, websocket) + logger.debug("Loaded langchain object") + + if langchain_object is None: + # Raise user facing error + raise ValueError( + "There was an error loading the langchain_object. Please, check all the nodes and try again." + ) + + # Generate result and thought + try: + logger.debug("Generating result and thought") + result, intermediate_steps = await get_result_and_steps( + langchain_object, chat_message.message or "", websocket=websocket + ) + logger.debug("Generated result and intermediate_steps") + return result, intermediate_steps + except Exception as e: + # Log stack trace + logger.exception(e) + raise e diff --git a/src/backend/langflow/config.yaml b/src/backend/langflow/config.yaml index e03b2b1be..c815f4b62 100644 --- a/src/backend/langflow/config.yaml +++ b/src/backend/langflow/config.yaml @@ -51,10 +51,13 @@ embeddings: llms: - OpenAI # - AzureOpenAI + # - AzureChatOpenAI - ChatOpenAI - LlamaCpp - CTransformers - Cohere + - Anthropic + - ChatAnthropic memories: - ConversationBufferMemory - ConversationSummaryMemory @@ -73,13 +76,14 @@ toolkits: - JsonToolkit - VectorStoreInfo - VectorStoreRouterToolkit + - VectorStoreToolkit tools: - Search - PAL-MATH - Calculator - Serper Search - Tool - - PythonFunction + - PythonFunctionTool - JsonSpec - News API - TMDB API @@ -118,6 +122,7 @@ vectorstores: - Chroma - Qdrant - Weaviate + - FAISS wrappers: - RequestsWrapper # - ChatPromptTemplate diff --git a/src/backend/langflow/custom/customs.py b/src/backend/langflow/custom/customs.py index ee266b0ee..f7a82e4a3 100644 --- a/src/backend/langflow/custom/customs.py +++ b/src/backend/langflow/custom/customs.py @@ -4,7 +4,7 @@ from langflow.template import frontend_node CUSTOM_NODES = { "prompts": {"ZeroShotPrompt": frontend_node.prompts.ZeroShotPromptNode()}, "tools": { - "PythonFunction": frontend_node.tools.PythonFunctionNode(), + "PythonFunctionTool": frontend_node.tools.PythonFunctionToolNode(), "Tool": frontend_node.tools.ToolNode(), }, "agents": { diff --git a/src/backend/langflow/graph/__init__.py b/src/backend/langflow/graph/__init__.py index 44859da02..a68e844ee 100644 --- a/src/backend/langflow/graph/__init__.py +++ b/src/backend/langflow/graph/__init__.py @@ -1,35 +1,35 @@ from langflow.graph.edge.base import Edge from langflow.graph.graph.base import Graph -from langflow.graph.node.base import Node -from langflow.graph.node.types import ( - AgentNode, - ChainNode, - DocumentLoaderNode, - EmbeddingNode, - LLMNode, - MemoryNode, - PromptNode, - TextSplitterNode, - ToolNode, - ToolkitNode, - VectorStoreNode, - WrapperNode, +from langflow.graph.vertex.base import Vertex +from langflow.graph.vertex.types import ( + AgentVertex, + ChainVertex, + DocumentLoaderVertex, + EmbeddingVertex, + LLMVertex, + MemoryVertex, + PromptVertex, + TextSplitterVertex, + ToolVertex, + ToolkitVertex, + VectorStoreVertex, + WrapperVertex, ) __all__ = [ "Graph", - "Node", + "Vertex", "Edge", - "AgentNode", - "ChainNode", - "DocumentLoaderNode", - "EmbeddingNode", - "LLMNode", - "MemoryNode", - "PromptNode", - "TextSplitterNode", - "ToolNode", - "ToolkitNode", - "VectorStoreNode", - "WrapperNode", + "AgentVertex", + "ChainVertex", + "DocumentLoaderVertex", + "EmbeddingVertex", + "LLMVertex", + "MemoryVertex", + "PromptVertex", + "TextSplitterVertex", + "ToolVertex", + "ToolkitVertex", + "VectorStoreVertex", + "WrapperVertex", ] diff --git a/src/backend/langflow/graph/edge/base.py b/src/backend/langflow/graph/edge/base.py index 2bf5a1ba4..08f084a5c 100644 --- a/src/backend/langflow/graph/edge/base.py +++ b/src/backend/langflow/graph/edge/base.py @@ -2,13 +2,13 @@ from langflow.utils.logger import logger from typing import TYPE_CHECKING if TYPE_CHECKING: - from langflow.graph.node.base import Node + from langflow.graph.vertex.base import Vertex class Edge: - def __init__(self, source: "Node", target: "Node"): - self.source: "Node" = source - self.target: "Node" = target + def __init__(self, source: "Vertex", target: "Vertex"): + self.source: "Vertex" = source + self.target: "Vertex" = target self.validate_edge() def validate_edge(self) -> None: @@ -41,7 +41,7 @@ class Edge: logger.debug(self.target_reqs) if no_matched_type: raise ValueError( - f"Edge between {self.source.node_type} and {self.target.node_type} " + f"Edge between {self.source.vertex_type} and {self.target.vertex_type} " f"has no matched type" ) diff --git a/src/backend/langflow/graph/graph/base.py b/src/backend/langflow/graph/graph/base.py index 3ba67837f..5fd00d09b 100644 --- a/src/backend/langflow/graph/graph/base.py +++ b/src/backend/langflow/graph/graph/base.py @@ -1,12 +1,12 @@ from typing import Dict, List, Type, Union from langflow.graph.edge.base import Edge -from langflow.graph.graph.constants import NODE_TYPE_MAP -from langflow.graph.node.base import Node -from langflow.graph.node.types import ( - FileToolNode, - LLMNode, - ToolkitNode, +from langflow.graph.graph.constants import VERTEX_TYPE_MAP +from langflow.graph.vertex.base import Vertex +from langflow.graph.vertex.types import ( + FileToolVertex, + LLMVertex, + ToolkitVertex, ) from langflow.interface.tools.constants import FILE_TOOLS from langflow.utils import payload @@ -24,9 +24,30 @@ class Graph: self._edges = edges self._build_graph() + @classmethod + @classmethod + def from_payload(cls, payload: Dict) -> "Graph": + """ + Creates a graph from a payload. + + Args: + payload (Dict): The payload to create the graph from. + + Returns: + Graph: The created graph. + """ + if "data" in payload: + payload = payload["data"] + try: + nodes = payload["nodes"] + edges = payload["edges"] + return cls(nodes, edges) + except KeyError as exc: + raise ValueError("Invalid payload") from exc + def _build_graph(self) -> None: """Builds the graph from the nodes and edges.""" - self.nodes = self._build_nodes() + self.nodes = self._build_vertices() self.edges = self._build_edges() for edge in self.edges: edge.source.add_edge(edge) @@ -43,12 +64,12 @@ class Graph: llm_node = None for node in self.nodes: node._build_params() - if isinstance(node, LLMNode): + if isinstance(node, LLMVertex): llm_node = node if llm_node: for node in self.nodes: - if isinstance(node, ToolkitNode): + if isinstance(node, ToolkitVertex): node.params["llm"] = llm_node def _remove_invalid_nodes(self) -> None: @@ -60,23 +81,23 @@ class Graph: or (len(self.nodes) == 1 and len(self.edges) == 0) ] - def _validate_node(self, node: Node) -> bool: + def _validate_node(self, node: Vertex) -> bool: """Validates a node.""" # All nodes that do not have edges are invalid return len(node.edges) > 0 - def get_node(self, node_id: str) -> Union[None, Node]: + def get_node(self, node_id: str) -> Union[None, Vertex]: """Returns a node by id.""" return next((node for node in self.nodes if node.id == node_id), None) - def get_nodes_with_target(self, node: Node) -> List[Node]: + def get_nodes_with_target(self, node: Vertex) -> List[Vertex]: """Returns the nodes connected to a node.""" - connected_nodes: List[Node] = [ + connected_nodes: List[Vertex] = [ edge.source for edge in self.edges if edge.target == node ] return connected_nodes - def build(self) -> List[Node]: + def build(self) -> List[Vertex]: """Builds the graph.""" # Get root node root_node = payload.get_root_node(self) @@ -84,9 +105,9 @@ class Graph: raise ValueError("No root node found") return root_node.build() - def get_node_neighbors(self, node: Node) -> Dict[Node, int]: + def get_node_neighbors(self, node: Vertex) -> Dict[Vertex, int]: """Returns the neighbors of a node.""" - neighbors: Dict[Node, int] = {} + neighbors: Dict[Vertex, int] = {} for edge in self.edges: if edge.source == node: neighbor = edge.target @@ -117,28 +138,30 @@ class Graph: edges.append(Edge(source, target)) return edges - def _get_node_class(self, node_type: str, node_lc_type: str) -> Type[Node]: + def _get_vertex_class(self, node_type: str, node_lc_type: str) -> Type[Vertex]: """Returns the node class based on the node type.""" if node_type in FILE_TOOLS: - return FileToolNode - if node_type in NODE_TYPE_MAP: - return NODE_TYPE_MAP[node_type] - return NODE_TYPE_MAP[node_lc_type] if node_lc_type in NODE_TYPE_MAP else Node + return FileToolVertex + if node_type in VERTEX_TYPE_MAP: + return VERTEX_TYPE_MAP[node_type] + return ( + VERTEX_TYPE_MAP[node_lc_type] if node_lc_type in VERTEX_TYPE_MAP else Vertex + ) - def _build_nodes(self) -> List[Node]: - """Builds the nodes of the graph.""" - nodes: List[Node] = [] + def _build_vertices(self) -> List[Vertex]: + """Builds the vertices of the graph.""" + nodes: List[Vertex] = [] for node in self._nodes: node_data = node["data"] node_type: str = node_data["type"] # type: ignore node_lc_type: str = node_data["node"]["template"]["_type"] # type: ignore - NodeClass = self._get_node_class(node_type, node_lc_type) - nodes.append(NodeClass(node)) + VertexClass = self._get_vertex_class(node_type, node_lc_type) + nodes.append(VertexClass(node)) return nodes - def get_children_by_node_type(self, node: Node, node_type: str) -> List[Node]: + def get_children_by_node_type(self, node: Vertex, node_type: str) -> List[Vertex]: """Returns the children of a node based on the node type.""" children = [] node_types = [node.data["type"]] diff --git a/src/backend/langflow/graph/graph/constants.py b/src/backend/langflow/graph/graph/constants.py index f5bc9b8e3..ff1317d39 100644 --- a/src/backend/langflow/graph/graph/constants.py +++ b/src/backend/langflow/graph/graph/constants.py @@ -1,17 +1,17 @@ -from langflow.graph.node.base import Node -from langflow.graph.node.types import ( - AgentNode, - ChainNode, - DocumentLoaderNode, - EmbeddingNode, - LLMNode, - MemoryNode, - PromptNode, - TextSplitterNode, - ToolNode, - ToolkitNode, - VectorStoreNode, - WrapperNode, +from langflow.graph.vertex.base import Vertex +from langflow.graph.vertex.types import ( + AgentVertex, + ChainVertex, + DocumentLoaderVertex, + EmbeddingVertex, + LLMVertex, + MemoryVertex, + PromptVertex, + TextSplitterVertex, + ToolVertex, + ToolkitVertex, + VectorStoreVertex, + WrapperVertex, ) from langflow.interface.agents.base import agent_creator from langflow.interface.chains.base import chain_creator @@ -33,17 +33,17 @@ from typing import Dict, Type DIRECT_TYPES = ["str", "bool", "code", "int", "float", "Any", "prompt"] -NODE_TYPE_MAP: Dict[str, Type[Node]] = { - **{t: PromptNode for t in prompt_creator.to_list()}, - **{t: AgentNode for t in agent_creator.to_list()}, - **{t: ChainNode for t in chain_creator.to_list()}, - **{t: ToolNode for t in tool_creator.to_list()}, - **{t: ToolkitNode for t in toolkits_creator.to_list()}, - **{t: WrapperNode for t in wrapper_creator.to_list()}, - **{t: LLMNode for t in llm_creator.to_list()}, - **{t: MemoryNode for t in memory_creator.to_list()}, - **{t: EmbeddingNode for t in embedding_creator.to_list()}, - **{t: VectorStoreNode for t in vectorstore_creator.to_list()}, - **{t: DocumentLoaderNode for t in documentloader_creator.to_list()}, - **{t: TextSplitterNode for t in textsplitter_creator.to_list()}, +VERTEX_TYPE_MAP: Dict[str, Type[Vertex]] = { + **{t: PromptVertex for t in prompt_creator.to_list()}, + **{t: AgentVertex for t in agent_creator.to_list()}, + **{t: ChainVertex for t in chain_creator.to_list()}, + **{t: ToolVertex for t in tool_creator.to_list()}, + **{t: ToolkitVertex for t in toolkits_creator.to_list()}, + **{t: WrapperVertex for t in wrapper_creator.to_list()}, + **{t: LLMVertex for t in llm_creator.to_list()}, + **{t: MemoryVertex for t in memory_creator.to_list()}, + **{t: EmbeddingVertex for t in embedding_creator.to_list()}, + **{t: VectorStoreVertex for t in vectorstore_creator.to_list()}, + **{t: DocumentLoaderVertex for t in documentloader_creator.to_list()}, + **{t: TextSplitterVertex for t in textsplitter_creator.to_list()}, } diff --git a/src/backend/langflow/graph/graph/utils.py b/src/backend/langflow/graph/graph/utils.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/backend/langflow/graph/utils.py b/src/backend/langflow/graph/utils.py index 6d56e933e..b78b2f961 100644 --- a/src/backend/langflow/graph/utils.py +++ b/src/backend/langflow/graph/utils.py @@ -1,4 +1,6 @@ -import re +from typing import Any, Union + +from langflow.interface.utils import extract_input_variables_from_prompt def validate_prompt(prompt: str): @@ -14,6 +16,12 @@ def fix_prompt(prompt: str): return prompt + " {input}" -def extract_input_variables_from_prompt(prompt: str) -> list[str]: - """Extract input variables from prompt.""" - return re.findall(r"{(.*?)}", prompt) +def flatten_list(list_of_lists: list[Union[list, Any]]) -> list: + """Flatten list of lists.""" + new_list = [] + for item in list_of_lists: + if isinstance(item, list): + new_list.extend(item) + else: + new_list.append(item) + return new_list diff --git a/src/backend/langflow/graph/vertex/__init__.py b/src/backend/langflow/graph/vertex/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/backend/langflow/graph/node/base.py b/src/backend/langflow/graph/vertex/base.py similarity index 89% rename from src/backend/langflow/graph/node/base.py rename to src/backend/langflow/graph/vertex/base.py index 5076deb9c..04dadab85 100644 --- a/src/backend/langflow/graph/node/base.py +++ b/src/backend/langflow/graph/vertex/base.py @@ -1,5 +1,5 @@ from langflow.cache import base as cache_utils -from langflow.graph.node.constants import DIRECT_TYPES +from langflow.graph.vertex.constants import DIRECT_TYPES from langflow.interface import loading from langflow.interface.listing import ALL_TYPES_DICT from langflow.utils.logger import logger @@ -17,7 +17,7 @@ if TYPE_CHECKING: from langflow.graph.edge.base import Edge -class Node: +class Vertex: def __init__(self, data: Dict, base_type: Optional[str] = None) -> None: self.id: str = data["id"] self._data = data @@ -48,12 +48,12 @@ class Node: ] template_dict = self.data["node"]["template"] - self.node_type = ( + self.vertex_type = ( self.data["type"] if "Tool" not in self.output else template_dict["_type"] ) if self.base_type is None: for base_type, value in ALL_TYPES_DICT.items(): - if self.node_type in value: + if self.vertex_type in value: self.base_type = base_type break @@ -113,7 +113,7 @@ class Node: if value["required"] and not edges: # If a required parameter is not found, raise an error raise ValueError( - f"Required input {key} for module {self.node_type} not found" + f"Required input {key} for module {self.vertex_type} not found" ) elif value["list"]: # If this is a list parameter, append all sources to a list @@ -128,7 +128,7 @@ class Node: # so we need to check if value has value new_value = value.get("value") if new_value is None: - warnings.warn(f"Value for {key} in {self.node_type} is None. ") + warnings.warn(f"Value for {key} in {self.vertex_type} is None. ") if value.get("type") == "int": with contextlib.suppress(TypeError, ValueError): new_value = int(new_value) # type: ignore @@ -148,12 +148,12 @@ class Node: # and continue # Another aspect is that the node_type is the class that we need to import # and instantiate with these built params - logger.debug(f"Building {self.node_type}") + logger.debug(f"Building {self.vertex_type}") # Build each node in the params dict for key, value in self.params.copy().items(): # Check if Node or list of Nodes and not self # to avoid recursion - if isinstance(value, Node): + if isinstance(value, Vertex): if value == self: del self.params[key] continue @@ -174,10 +174,16 @@ class Node: # turn result which is a function into a coroutine # so that it can be awaited self.params["coroutine"] = sync_to_async(result) + if isinstance(result, list): + # If the result is a list, then we need to extend the list + # with the result but first check if the key exists + # if it doesn't, then we need to create a new list + if isinstance(self.params[key], list): + self.params[key].extend(result) self.params[key] = result elif isinstance(value, list) and all( - isinstance(node, Node) for node in value + isinstance(node, Vertex) for node in value ): self.params[key] = [] for node in value: @@ -193,17 +199,17 @@ class Node: try: self._built_object = loading.instantiate_class( - node_type=self.node_type, + node_type=self.vertex_type, base_type=self.base_type, params=self.params, ) except Exception as exc: raise ValueError( - f"Error building node {self.node_type}: {str(exc)}" + f"Error building node {self.vertex_type}: {str(exc)}" ) from exc if self._built_object is None: - raise ValueError(f"Node type {self.node_type} not found") + raise ValueError(f"Node type {self.vertex_type} not found") self._built = True @@ -220,7 +226,7 @@ class Node: return f"Node(id={self.id}, data={self.data})" def __eq__(self, __o: object) -> bool: - return self.id == __o.id if isinstance(__o, Node) else False + return self.id == __o.id if isinstance(__o, Vertex) else False def __hash__(self) -> int: return id(self) diff --git a/src/backend/langflow/graph/node/constants.py b/src/backend/langflow/graph/vertex/constants.py similarity index 100% rename from src/backend/langflow/graph/node/constants.py rename to src/backend/langflow/graph/vertex/constants.py diff --git a/src/backend/langflow/graph/node/types.py b/src/backend/langflow/graph/vertex/types.py similarity index 72% rename from src/backend/langflow/graph/node/types.py rename to src/backend/langflow/graph/vertex/types.py index 9b25fd6ee..4eb20f416 100644 --- a/src/backend/langflow/graph/node/types.py +++ b/src/backend/langflow/graph/vertex/types.py @@ -1,22 +1,23 @@ from typing import Any, Dict, List, Optional, Union -from langflow.graph.node.base import Node -from langflow.graph.utils import extract_input_variables_from_prompt +from langflow.graph.vertex.base import Vertex +from langflow.graph.utils import flatten_list +from langflow.interface.utils import extract_input_variables_from_prompt -class AgentNode(Node): +class AgentVertex(Vertex): def __init__(self, data: Dict): super().__init__(data, base_type="agents") - self.tools: List[ToolNode] = [] - self.chains: List[ChainNode] = [] + self.tools: List[Union[ToolkitVertex, ToolVertex]] = [] + self.chains: List[ChainVertex] = [] def _set_tools_and_chains(self) -> None: for edge in self.edges: source_node = edge.source - if isinstance(source_node, ToolNode): + if isinstance(source_node, (ToolVertex, ToolkitVertex)): self.tools.append(source_node) - elif isinstance(source_node, ChainNode): + elif isinstance(source_node, ChainVertex): self.chains.append(source_node) def build(self, force: bool = False) -> Any: @@ -32,25 +33,130 @@ class AgentNode(Node): self._build() - #! Cannot deepcopy VectorStore, VectorStoreRouter, or SQL agents - if self.node_type in ["VectorStoreAgent", "VectorStoreRouterAgent", "SQLAgent"]: - return self._built_object return self._built_object -class ToolNode(Node): +class ToolVertex(Vertex): def __init__(self, data: Dict): super().__init__(data, base_type="tools") -class PromptNode(Node): +class LLMVertex(Vertex): + built_node_type = None + class_built_object = None + + def __init__(self, data: Dict): + super().__init__(data, base_type="llms") + + def build(self, force: bool = False) -> Any: + # LLM is different because some models might take up too much memory + # or time to load. So we only load them when we need them.ß + if self.vertex_type == self.built_node_type: + return self.class_built_object + if not self._built or force: + self._build() + self.built_node_type = self.vertex_type + self.class_built_object = self._built_object + # Avoid deepcopying the LLM + # that are loaded from a file + return self._built_object + + +class ToolkitVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="toolkits") + + +class FileToolVertex(ToolVertex): + def __init__(self, data: Dict): + super().__init__(data) + + +class WrapperVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="wrappers") + + def build(self, force: bool = False) -> Any: + if not self._built or force: + if "headers" in self.params: + self.params["headers"] = eval(self.params["headers"]) + self._build() + return self._built_object + + +class DocumentLoaderVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="documentloaders") + + def _built_object_repr(self): + # This built_object is a list of documents. Maybe we should + # show how many documents are in the list? + if self._built_object: + return f"""{self.vertex_type}({len(self._built_object)} documents) + Documents: {self._built_object[:3]}...""" + return f"{self.vertex_type}()" + + +class EmbeddingVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="embeddings") + + +class VectorStoreVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="vectorstores") + + def _built_object_repr(self): + return "Vector stores can take time to build. It will build on the first query." + + +class MemoryVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="memory") + + +class TextSplitterVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="textsplitters") + + def _built_object_repr(self): + # This built_object is a list of documents. Maybe we should + # show how many documents are in the list? + if self._built_object: + return f"""{self.vertex_type}({len(self._built_object)} documents) + \nDocuments: {self._built_object[:3]}...""" + return f"{self.vertex_type}()" + + +class ChainVertex(Vertex): + def __init__(self, data: Dict): + super().__init__(data, base_type="chains") + + def build( + self, + force: bool = False, + tools: Optional[List[Union[ToolkitVertex, ToolVertex]]] = None, + ) -> Any: + if not self._built or force: + # Check if the chain requires a PromptVertex + for key, value in self.params.items(): + if isinstance(value, PromptVertex): + # Build the PromptVertex, passing the tools if available + self.params[key] = value.build(tools=tools, force=force) + + self._build() + + return self._built_object + + +class PromptVertex(Vertex): def __init__(self, data: Dict): super().__init__(data, base_type="prompts") def build( self, force: bool = False, - tools: Optional[Union[List[Node], List[ToolNode]]] = None, + tools: Optional[List[Union[ToolkitVertex, ToolVertex]]] = None, ) -> Any: if not self._built or force: if ( @@ -59,12 +165,16 @@ class PromptNode(Node): ): self.params["input_variables"] = [] # Check if it is a ZeroShotPrompt and needs a tool - if "ShotPrompt" in self.node_type: + if "ShotPrompt" in self.vertex_type: tools = ( [tool_node.build() for tool_node in tools] if tools is not None else [] ) + # flatten the list of tools if it is a list of lists + # first check if it is a list + if tools and isinstance(tools, list) and isinstance(tools[0], list): + tools = flatten_list(tools) self.params["tools"] = tools prompt_params = [ key @@ -81,113 +191,3 @@ class PromptNode(Node): self._build() return self._built_object - - -class ChainNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="chains") - - def build( - self, - force: bool = False, - tools: Optional[Union[List[Node], List[ToolNode]]] = None, - ) -> Any: - if not self._built or force: - # Check if the chain requires a PromptNode - for key, value in self.params.items(): - if isinstance(value, PromptNode): - # Build the PromptNode, passing the tools if available - self.params[key] = value.build(tools=tools, force=force) - - self._build() - - #! Cannot deepcopy SQLDatabaseChain - if self.node_type in ["SQLDatabaseChain"]: - return self._built_object - return self._built_object - - -class LLMNode(Node): - built_node_type = None - class_built_object = None - - def __init__(self, data: Dict): - super().__init__(data, base_type="llms") - - def build(self, force: bool = False) -> Any: - # LLM is different because some models might take up too much memory - # or time to load. So we only load them when we need them.ß - if self.node_type == self.built_node_type: - return self.class_built_object - if not self._built or force: - self._build() - self.built_node_type = self.node_type - self.class_built_object = self._built_object - # Avoid deepcopying the LLM - # that are loaded from a file - return self._built_object - - -class ToolkitNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="toolkits") - - -class FileToolNode(ToolNode): - def __init__(self, data: Dict): - super().__init__(data) - - -class WrapperNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="wrappers") - - def build(self, force: bool = False) -> Any: - if not self._built or force: - if "headers" in self.params: - self.params["headers"] = eval(self.params["headers"]) - self._build() - return self._built_object - - -class DocumentLoaderNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="documentloaders") - - def _built_object_repr(self): - # This built_object is a list of documents. Maybe we should - # show how many documents are in the list? - if self._built_object: - return f"""{self.node_type}({len(self._built_object)} documents) - Documents: {self._built_object[:3]}...""" - return f"{self.node_type}()" - - -class EmbeddingNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="embeddings") - - -class VectorStoreNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="vectorstores") - - def _built_object_repr(self): - return "Vector stores can take time to build. It will build on the first query." - - -class MemoryNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="memory") - - -class TextSplitterNode(Node): - def __init__(self, data: Dict): - super().__init__(data, base_type="textsplitters") - - def _built_object_repr(self): - # This built_object is a list of documents. Maybe we should - # show how many documents are in the list? - if self._built_object: - return f"""{self.node_type}({len(self._built_object)} documents)\nDocuments: {self._built_object[:3]}...""" - return f"{self.node_type}()" diff --git a/src/backend/langflow/interface/agents/custom.py b/src/backend/langflow/interface/agents/custom.py index 4654ef7cb..3aaa132d4 100644 --- a/src/backend/langflow/interface/agents/custom.py +++ b/src/backend/langflow/interface/agents/custom.py @@ -69,7 +69,7 @@ class JsonAgent(CustomAgentExecutor): @classmethod def from_toolkit_and_llm(cls, toolkit: JsonToolkit, llm: BaseLanguageModel): - tools = toolkit.get_tools() + tools = toolkit if isinstance(toolkit, list) else toolkit.get_tools() tool_names = {tool.name for tool in tools} prompt = ZeroShotAgent.create_prompt( tools, diff --git a/src/backend/langflow/interface/chains/custom.py b/src/backend/langflow/interface/chains/custom.py index cb76a53c8..ba4ba8b62 100644 --- a/src/backend/langflow/interface/chains/custom.py +++ b/src/backend/langflow/interface/chains/custom.py @@ -5,7 +5,7 @@ from langchain.memory.buffer import ConversationBufferMemory from langchain.schema import BaseMemory from pydantic import Field, root_validator -from langflow.graph.utils import extract_input_variables_from_prompt +from langflow.interface.utils import extract_input_variables_from_prompt DEFAULT_SUFFIX = """" Current conversation: diff --git a/src/backend/langflow/interface/custom_lists.py b/src/backend/langflow/interface/custom_lists.py index 0fea838b6..34bc0103e 100644 --- a/src/backend/langflow/interface/custom_lists.py +++ b/src/backend/langflow/interface/custom_lists.py @@ -11,12 +11,15 @@ from langchain import ( text_splitter, ) from langchain.agents import agent_toolkits -from langchain.chat_models import ChatOpenAI +from langchain.chat_models import AzureChatOpenAI, ChatOpenAI +from langchain.chat_models import ChatAnthropic from langflow.interface.importing.utils import import_class ## LLMs llm_type_to_cls_dict = llms.type_to_cls_dict +llm_type_to_cls_dict["anthropic-chat"] = ChatAnthropic # type: ignore +llm_type_to_cls_dict["azure-chat"] = AzureChatOpenAI # type: ignore llm_type_to_cls_dict["openai-chat"] = ChatOpenAI # type: ignore ## Chains diff --git a/src/backend/langflow/interface/importing/utils.py b/src/backend/langflow/interface/importing/utils.py index d08e52999..f65376d48 100644 --- a/src/backend/langflow/interface/importing/utils.py +++ b/src/backend/langflow/interface/importing/utils.py @@ -9,6 +9,7 @@ from langchain.base_language import BaseLanguageModel from langchain.chains.base import Chain from langchain.chat_models.base import BaseChatModel from langchain.tools import BaseTool +from langflow.utils import validate def import_module(module_path: str) -> Any: @@ -147,3 +148,10 @@ def import_utility(utility: str) -> Any: if utility == "SQLDatabase": return import_class(f"langchain.sql_database.{utility}") return import_class(f"langchain.utilities.{utility}") + + +def get_function(code): + """Get the function""" + function_name = validate.extract_function_name(code) + + return validate.create_function(code, function_name) diff --git a/src/backend/langflow/interface/loading.py b/src/backend/langflow/interface/loading.py index a3799be16..eb4623f5a 100644 --- a/src/backend/langflow/interface/loading.py +++ b/src/backend/langflow/interface/loading.py @@ -12,7 +12,6 @@ from langchain.agents.load_tools import ( _LLM_TOOLS, ) from langchain.agents.loading import load_agent_from_config -from langflow.graph import Graph from langchain.agents.tools import Tool from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager @@ -21,12 +20,11 @@ from langchain.llms.loading import load_llm_from_config from pydantic import ValidationError from langflow.interface.agents.custom import CUSTOM_AGENTS -from langflow.interface.importing.utils import import_by_type -from langflow.interface.run import fix_memory_inputs +from langflow.interface.importing.utils import get_function, import_by_type from langflow.interface.toolkits.base import toolkits_creator from langflow.interface.types import get_type_list from langflow.interface.utils import load_file_into_dict -from langflow.utils import util, validate +from langflow.utils import util def instantiate_class(node_type: str, base_type: str, params: Dict) -> Any: @@ -100,11 +98,9 @@ def instantiate_tool(node_type, class_object, params): if node_type == "JsonSpec": params["dict_"] = load_file_into_dict(params.pop("path")) return class_object(**params) - elif node_type == "PythonFunction": - function_string = params["code"] - if isinstance(function_string, str): - return validate.eval_function(function_string) - raise ValueError("Function should be a string") + elif node_type == "PythonFunctionTool": + params["func"] = get_function(params.get("code")) + return class_object(**params) elif node_type.lower() == "tool": return class_object(**params) return class_object(**params) @@ -112,8 +108,11 @@ def instantiate_tool(node_type, class_object, params): def instantiate_toolkit(node_type, class_object, params): loaded_toolkit = class_object(**params) - if toolkits_creator.has_create_function(node_type): - return load_toolkits_executor(node_type, loaded_toolkit, params) + # Commenting this out for now to use toolkits as normal tools + # if toolkits_creator.has_create_function(node_type): + # return load_toolkits_executor(node_type, loaded_toolkit, params) + if isinstance(loaded_toolkit, BaseToolkit): + return loaded_toolkit.get_tools() return loaded_toolkit @@ -162,37 +161,6 @@ def instantiate_utility(node_type, class_object, params): return class_object(**params) -def load_flow_from_json(path: str, build=True): - """Load flow from json file""" - # This is done to avoid circular imports - - with open(path, "r", encoding="utf-8") as f: - flow_graph = json.load(f) - data_graph = flow_graph["data"] - nodes = data_graph["nodes"] - # Substitute ZeroShotPrompt with PromptTemplate - # nodes = replace_zero_shot_prompt_with_prompt_template(nodes) - # Add input variables - # nodes = payload.extract_input_variables(nodes) - - # Nodes, edges and root node - edges = data_graph["edges"] - graph = Graph(nodes, edges) - if build: - langchain_object = graph.build() - if hasattr(langchain_object, "verbose"): - langchain_object.verbose = True - - if hasattr(langchain_object, "return_intermediate_steps"): - # https://github.com/hwchase17/langchain/issues/2068 - # Deactivating until we have a frontend solution - # to display intermediate steps - langchain_object.return_intermediate_steps = False - fix_memory_inputs(langchain_object) - return langchain_object - return graph - - def replace_zero_shot_prompt_with_prompt_template(nodes): """Replace ZeroShotPrompt with PromptTemplate""" for node in nodes: diff --git a/src/backend/langflow/interface/prompts/custom.py b/src/backend/langflow/interface/prompts/custom.py index b1dbef370..286210271 100644 --- a/src/backend/langflow/interface/prompts/custom.py +++ b/src/backend/langflow/interface/prompts/custom.py @@ -3,7 +3,7 @@ from typing import Dict, List, Optional, Type from langchain.prompts import PromptTemplate from pydantic import root_validator -from langflow.graph.utils import extract_input_variables_from_prompt +from langflow.interface.utils import extract_input_variables_from_prompt # Steps to create a BaseCustomPrompt: # 1. Create a prompt template that endes with: diff --git a/src/backend/langflow/interface/run.py b/src/backend/langflow/interface/run.py index c2483416f..89f71fd8b 100644 --- a/src/backend/langflow/interface/run.py +++ b/src/backend/langflow/interface/run.py @@ -1,10 +1,3 @@ -import contextlib -import io -from typing import Any, Dict, List, Tuple - -from langchain.schema import AgentAction - -from langflow.api.callback import AsyncStreamingLLMCallbackHandler, StreamingLLMCallbackHandler # type: ignore from langflow.cache.base import compute_dict_hash, load_cache, memoize_dict from langflow.graph import Graph from langflow.utils.logger import logger @@ -24,15 +17,6 @@ def load_langchain_object(data_graph, is_first_message=False): return computed_hash, langchain_object -def load_or_build_langchain_object(data_graph, is_first_message=False): - """ - Load langchain object from cache if it exists, otherwise build it. - """ - if is_first_message: - build_langchain_object_with_caching.clear_cache() - return build_langchain_object_with_caching(data_graph) - - @memoize_dict(maxsize=10) def build_langchain_object_with_caching(data_graph): """ @@ -40,16 +24,10 @@ def build_langchain_object_with_caching(data_graph): """ logger.debug("Building langchain object") - graph = build_graph(data_graph) + graph = Graph.from_payload(data_graph) return graph.build() -def build_graph(data_graph): - nodes = data_graph["nodes"] - edges = data_graph["edges"] - return Graph(nodes, edges) - - def build_langchain_object(data_graph): """ Build langchain object from data_graph. @@ -66,29 +44,6 @@ def build_langchain_object(data_graph): return graph.build() -def process_graph_cached(data_graph: Dict[str, Any], message: str): - """ - Process graph by extracting input variables and replacing ZeroShotPrompt - with PromptTemplate,then run the graph and return the result and thought. - """ - # Load langchain object - is_first_message = len(data_graph.get("chatHistory", [])) == 0 - langchain_object = load_or_build_langchain_object(data_graph, is_first_message) - logger.debug("Loaded langchain object") - - if langchain_object is None: - # Raise user facing error - raise ValueError( - "There was an error loading the langchain_object. Please, check all the nodes and try again." - ) - - # Generate result and thought - logger.debug("Generating result and thought") - result, thought = get_result_and_thought(langchain_object, message) - logger.debug("Generated result and thought") - return {"result": str(result), "thought": thought.strip()} - - def get_memory_key(langchain_object): """ Given a LangChain object, this function retrieves the current memory key from the object's memory attribute. @@ -124,147 +79,3 @@ def update_memory_keys(langchain_object, possible_new_mem_key): langchain_object.memory.input_key = input_key langchain_object.memory.output_key = output_key langchain_object.memory.memory_key = possible_new_mem_key - - -def fix_memory_inputs(langchain_object): - """ - Given a LangChain object, this function checks if it has a memory attribute and if that memory key exists in the - object's input variables. If so, it does nothing. Otherwise, it gets a possible new memory key using the - get_memory_key function and updates the memory keys using the update_memory_keys function. - """ - if hasattr(langchain_object, "memory") and langchain_object.memory is not None: - try: - if langchain_object.memory.memory_key in langchain_object.input_variables: - return - except AttributeError: - input_variables = ( - langchain_object.prompt.input_variables - if hasattr(langchain_object, "prompt") - else langchain_object.input_keys - ) - if langchain_object.memory.memory_key in input_variables: - return - - possible_new_mem_key = get_memory_key(langchain_object) - if possible_new_mem_key is not None: - update_memory_keys(langchain_object, possible_new_mem_key) - - -async def get_result_and_steps(langchain_object, message: str, **kwargs): - """Get result and thought from extracted json""" - - try: - if hasattr(langchain_object, "verbose"): - langchain_object.verbose = True - chat_input = None - memory_key = "" - if hasattr(langchain_object, "memory") and langchain_object.memory is not None: - memory_key = langchain_object.memory.memory_key - - if hasattr(langchain_object, "input_keys"): - for key in langchain_object.input_keys: - if key not in [memory_key, "chat_history"]: - chat_input = {key: message} - else: - chat_input = message # type: ignore - - if hasattr(langchain_object, "return_intermediate_steps"): - # https://github.com/hwchase17/langchain/issues/2068 - # Deactivating until we have a frontend solution - # to display intermediate steps - langchain_object.return_intermediate_steps = True - - fix_memory_inputs(langchain_object) - try: - async_callbacks = [AsyncStreamingLLMCallbackHandler(**kwargs)] - output = await langchain_object.acall(chat_input, callbacks=async_callbacks) - except Exception as exc: - # make the error message more informative - logger.debug(f"Error: {str(exc)}") - sync_callbacks = [StreamingLLMCallbackHandler(**kwargs)] - output = langchain_object(chat_input, callbacks=sync_callbacks) - - intermediate_steps = ( - output.get("intermediate_steps", []) if isinstance(output, dict) else [] - ) - - result = ( - output.get(langchain_object.output_keys[0]) - if isinstance(output, dict) - else output - ) - thought = format_actions(intermediate_steps) if intermediate_steps else "" - except Exception as exc: - raise ValueError(f"Error: {str(exc)}") from exc - return result, thought - - -def get_result_and_thought(langchain_object, message: str): - """Get result and thought from extracted json""" - try: - if hasattr(langchain_object, "verbose"): - langchain_object.verbose = True - chat_input = None - memory_key = "" - if hasattr(langchain_object, "memory") and langchain_object.memory is not None: - memory_key = langchain_object.memory.memory_key - - if hasattr(langchain_object, "input_keys"): - for key in langchain_object.input_keys: - if key not in [memory_key, "chat_history"]: - chat_input = {key: message} - else: - chat_input = message # type: ignore - - if hasattr(langchain_object, "return_intermediate_steps"): - # https://github.com/hwchase17/langchain/issues/2068 - # Deactivating until we have a frontend solution - # to display intermediate steps - langchain_object.return_intermediate_steps = False - - fix_memory_inputs(langchain_object) - - with io.StringIO() as output_buffer, contextlib.redirect_stdout(output_buffer): - try: - # if hasattr(langchain_object, "acall"): - # output = await langchain_object.acall(chat_input) - # else: - output = langchain_object(chat_input) - except ValueError as exc: - # make the error message more informative - logger.debug(f"Error: {str(exc)}") - output = langchain_object.run(chat_input) - - intermediate_steps = ( - output.get("intermediate_steps", []) if isinstance(output, dict) else [] - ) - - result = ( - output.get(langchain_object.output_keys[0]) - if isinstance(output, dict) - else output - ) - if intermediate_steps: - thought = format_actions(intermediate_steps) - else: - thought = output_buffer.getvalue() - - except Exception as exc: - raise ValueError(f"Error: {str(exc)}") from exc - return result, thought - - -def format_actions(actions: List[Tuple[AgentAction, str]]) -> str: - """Format a list of (AgentAction, answer) tuples into a string.""" - output = [] - for action, answer in actions: - log = action.log - tool = action.tool - tool_input = action.tool_input - output.append(f"Log: {log}") - if "Action" not in log and "Action Input" not in log: - output.append(f"Tool: {tool}") - output.append(f"Tool Input: {tool_input}") - output.append(f"Answer: {answer}") - output.append("") # Add a blank line - return "\n".join(output) diff --git a/src/backend/langflow/interface/toolkits/base.py b/src/backend/langflow/interface/toolkits/base.py index cbe625f0d..be2345c02 100644 --- a/src/backend/langflow/interface/toolkits/base.py +++ b/src/backend/langflow/interface/toolkits/base.py @@ -42,24 +42,27 @@ class ToolkitCreator(LangChainTypeCreator): def get_signature(self, name: str) -> Optional[Dict]: try: - return build_template_from_class(name, self.type_to_loader_dict) + template = build_template_from_class(name, self.type_to_loader_dict) + # add Tool to base_classes + if "toolkit" in name.lower() and template: + template["base_classes"].append("Tool") + return template except ValueError as exc: - raise ValueError("Prompt not found") from exc + raise ValueError("Toolkit not found") from exc except AttributeError as exc: - logger.error(f"Prompt {name} not loaded: {exc}") + logger.error(f"Toolkit {name} not loaded: {exc}") return None def to_list(self) -> List[str]: return list(self.type_to_loader_dict.keys()) def get_create_function(self, name: str) -> Callable: - if loader_name := self.create_functions.get(name, None): - # import loader + if loader_name := self.create_functions.get(name): return import_module( f"from langchain.agents.agent_toolkits import {loader_name[0]}" ) else: - raise ValueError("Loader not found") + raise ValueError("Toolkit not found") def has_create_function(self, name: str) -> bool: # check if the function list is not empty diff --git a/src/backend/langflow/interface/tools/base.py b/src/backend/langflow/interface/tools/base.py index a8e7045c0..d6b114e4c 100644 --- a/src/backend/langflow/interface/tools/base.py +++ b/src/backend/langflow/interface/tools/base.py @@ -71,7 +71,8 @@ class ToolCreator(LangChainTypeCreator): for tool, tool_fcn in ALL_TOOLS_NAMES.items(): tool_params = get_tool_params(tool_fcn) - tool_name = tool_params.get("name", tool) + + tool_name = tool_params.get("name") or tool if tool_name in settings.tools or settings.dev: if tool_name == "JsonSpec": diff --git a/src/backend/langflow/interface/tools/constants.py b/src/backend/langflow/interface/tools/constants.py index f939d55ad..31c75ec08 100644 --- a/src/backend/langflow/interface/tools/constants.py +++ b/src/backend/langflow/interface/tools/constants.py @@ -9,10 +9,10 @@ from langchain.agents.load_tools import ( from langchain.tools.json.tool import JsonSpec from langflow.interface.importing.utils import import_class -from langflow.interface.tools.custom import PythonFunction +from langflow.interface.tools.custom import PythonFunctionTool FILE_TOOLS = {"JsonSpec": JsonSpec} -CUSTOM_TOOLS = {"Tool": Tool, "PythonFunction": PythonFunction} +CUSTOM_TOOLS = {"Tool": Tool, "PythonFunctionTool": PythonFunctionTool} OTHER_TOOLS = {tool: import_class(f"langchain.tools.{tool}") for tool in tools.__all__} diff --git a/src/backend/langflow/interface/tools/custom.py b/src/backend/langflow/interface/tools/custom.py index 4c641f388..b2d43565d 100644 --- a/src/backend/langflow/interface/tools/custom.py +++ b/src/backend/langflow/interface/tools/custom.py @@ -1,13 +1,14 @@ -from typing import Callable, Optional +from typing import Optional +from langflow.interface.importing.utils import get_function from pydantic import BaseModel, validator from langflow.utils import validate +from langchain.agents.tools import Tool class Function(BaseModel): code: str - function: Optional[Callable] = None imports: Optional[str] = None # Eval code and store the function @@ -24,14 +25,17 @@ class Function(BaseModel): return v - def get_function(self): - """Get the function""" - function_name = validate.extract_function_name(self.code) - return validate.create_function(self.code, function_name) - - -class PythonFunction(Function): +class PythonFunctionTool(Function, Tool): """Python function""" + name: str = "Custom Tool" + description: str code: str + + def ___init__(self, name: str, description: str, code: str): + self.name = name + self.description = description + self.code = code + self.func = get_function(self.code) + super().__init__(name=name, description=description, func=self.func) diff --git a/src/backend/langflow/interface/utils.py b/src/backend/langflow/interface/utils.py index 2b7c5acd1..32c605654 100644 --- a/src/backend/langflow/interface/utils.py +++ b/src/backend/langflow/interface/utils.py @@ -2,6 +2,7 @@ import base64 import json import os from io import BytesIO +import re import yaml from langchain.base_language import BaseLanguageModel @@ -48,3 +49,8 @@ def try_setting_streaming_options(langchain_object, websocket): llm.streaming = True return langchain_object + + +def extract_input_variables_from_prompt(prompt: str) -> list[str]: + """Extract input variables from prompt.""" + return re.findall(r"{(.*?)}", prompt) diff --git a/src/backend/langflow/main.py b/src/backend/langflow/main.py index 56cc32e46..de39d8750 100644 --- a/src/backend/langflow/main.py +++ b/src/backend/langflow/main.py @@ -1,9 +1,7 @@ from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware -from langflow.api.chat import router as chat_router -from langflow.api.endpoints import router as endpoints_router -from langflow.api.validate import router as validate_router +from langflow.api import router def create_app(): @@ -14,6 +12,10 @@ def create_app(): "*", ] + @app.get("/health") + def get_health(): + return {"status": "OK"} + app.add_middleware( CORSMiddleware, allow_origins=origins, @@ -22,9 +24,7 @@ def create_app(): allow_headers=["*"], ) - app.include_router(endpoints_router) - app.include_router(validate_router) - app.include_router(chat_router) + app.include_router(router) return app diff --git a/src/backend/langflow/processing/__init__.py b/src/backend/langflow/processing/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/backend/langflow/processing/base.py b/src/backend/langflow/processing/base.py new file mode 100644 index 000000000..97b0d5be0 --- /dev/null +++ b/src/backend/langflow/processing/base.py @@ -0,0 +1,55 @@ +from langflow.api.v1.callback import ( + AsyncStreamingLLMCallbackHandler, + StreamingLLMCallbackHandler, +) +from langflow.processing.process import fix_memory_inputs, format_actions +from langflow.utils.logger import logger + + +async def get_result_and_steps(langchain_object, message: str, **kwargs): + """Get result and thought from extracted json""" + + try: + if hasattr(langchain_object, "verbose"): + langchain_object.verbose = True + chat_input = None + memory_key = "" + if hasattr(langchain_object, "memory") and langchain_object.memory is not None: + memory_key = langchain_object.memory.memory_key + + if hasattr(langchain_object, "input_keys"): + for key in langchain_object.input_keys: + if key not in [memory_key, "chat_history"]: + chat_input = {key: message} + else: + chat_input = message # type: ignore + + if hasattr(langchain_object, "return_intermediate_steps"): + # https://github.com/hwchase17/langchain/issues/2068 + # Deactivating until we have a frontend solution + # to display intermediate steps + langchain_object.return_intermediate_steps = True + + fix_memory_inputs(langchain_object) + try: + async_callbacks = [AsyncStreamingLLMCallbackHandler(**kwargs)] + output = await langchain_object.acall(chat_input, callbacks=async_callbacks) + except Exception as exc: + # make the error message more informative + logger.debug(f"Error: {str(exc)}") + sync_callbacks = [StreamingLLMCallbackHandler(**kwargs)] + output = langchain_object(chat_input, callbacks=sync_callbacks) + + intermediate_steps = ( + output.get("intermediate_steps", []) if isinstance(output, dict) else [] + ) + + result = ( + output.get(langchain_object.output_keys[0]) + if isinstance(output, dict) + else output + ) + thought = format_actions(intermediate_steps) if intermediate_steps else "" + except Exception as exc: + raise ValueError(f"Error: {str(exc)}") from exc + return result, thought diff --git a/src/backend/langflow/processing/process.py b/src/backend/langflow/processing/process.py new file mode 100644 index 000000000..3b8852e00 --- /dev/null +++ b/src/backend/langflow/processing/process.py @@ -0,0 +1,172 @@ +import contextlib +import io +from langchain.schema import AgentAction +import json +from langflow.interface.run import ( + build_langchain_object_with_caching, + get_memory_key, + update_memory_keys, +) +from langflow.utils.logger import logger +from langflow.graph import Graph + + +from typing import Any, Dict, List, Tuple + + +def fix_memory_inputs(langchain_object): + """ + Given a LangChain object, this function checks if it has a memory attribute and if that memory key exists in the + object's input variables. If so, it does nothing. Otherwise, it gets a possible new memory key using the + get_memory_key function and updates the memory keys using the update_memory_keys function. + """ + if hasattr(langchain_object, "memory") and langchain_object.memory is not None: + try: + if langchain_object.memory.memory_key in langchain_object.input_variables: + return + except AttributeError: + input_variables = ( + langchain_object.prompt.input_variables + if hasattr(langchain_object, "prompt") + else langchain_object.input_keys + ) + if langchain_object.memory.memory_key in input_variables: + return + + possible_new_mem_key = get_memory_key(langchain_object) + if possible_new_mem_key is not None: + update_memory_keys(langchain_object, possible_new_mem_key) + + +def format_actions(actions: List[Tuple[AgentAction, str]]) -> str: + """Format a list of (AgentAction, answer) tuples into a string.""" + output = [] + for action, answer in actions: + log = action.log + tool = action.tool + tool_input = action.tool_input + output.append(f"Log: {log}") + if "Action" not in log and "Action Input" not in log: + output.append(f"Tool: {tool}") + output.append(f"Tool Input: {tool_input}") + output.append(f"Answer: {answer}") + output.append("") # Add a blank line + return "\n".join(output) + + +def get_result_and_thought(langchain_object, message: str): + """Get result and thought from extracted json""" + try: + if hasattr(langchain_object, "verbose"): + langchain_object.verbose = True + chat_input = None + memory_key = "" + if hasattr(langchain_object, "memory") and langchain_object.memory is not None: + memory_key = langchain_object.memory.memory_key + + if hasattr(langchain_object, "input_keys"): + for key in langchain_object.input_keys: + if key not in [memory_key, "chat_history"]: + chat_input = {key: message} + else: + chat_input = message # type: ignore + + if hasattr(langchain_object, "return_intermediate_steps"): + # https://github.com/hwchase17/langchain/issues/2068 + # Deactivating until we have a frontend solution + # to display intermediate steps + langchain_object.return_intermediate_steps = False + + fix_memory_inputs(langchain_object) + + with io.StringIO() as output_buffer, contextlib.redirect_stdout(output_buffer): + try: + # if hasattr(langchain_object, "acall"): + # output = await langchain_object.acall(chat_input) + # else: + output = langchain_object(chat_input) + except ValueError as exc: + # make the error message more informative + logger.debug(f"Error: {str(exc)}") + output = langchain_object.run(chat_input) + + intermediate_steps = ( + output.get("intermediate_steps", []) if isinstance(output, dict) else [] + ) + + result = ( + output.get(langchain_object.output_keys[0]) + if isinstance(output, dict) + else output + ) + if intermediate_steps: + thought = format_actions(intermediate_steps) + else: + thought = output_buffer.getvalue() + + except Exception as exc: + raise ValueError(f"Error: {str(exc)}") from exc + return result, thought + + +def load_or_build_langchain_object(data_graph, is_first_message=False): + """ + Load langchain object from cache if it exists, otherwise build it. + """ + if is_first_message: + build_langchain_object_with_caching.clear_cache() + return build_langchain_object_with_caching(data_graph) + + +def process_graph_cached(data_graph: Dict[str, Any], message: str): + """ + Process graph by extracting input variables and replacing ZeroShotPrompt + with PromptTemplate,then run the graph and return the result and thought. + """ + # Load langchain object + is_first_message = len(data_graph.get("chatHistory", [])) == 0 + langchain_object = load_or_build_langchain_object(data_graph, is_first_message) + logger.debug("Loaded langchain object") + + if langchain_object is None: + # Raise user facing error + raise ValueError( + "There was an error loading the langchain_object. Please, check all the nodes and try again." + ) + + # Generate result and thought + logger.debug("Generating result and thought") + result, thought = get_result_and_thought(langchain_object, message) + logger.debug("Generated result and thought") + return {"result": str(result), "thought": thought.strip()} + + +def load_flow_from_json(path: str, build=True): + """Load flow from json file""" + # This is done to avoid circular imports + + with open(path, "r", encoding="utf-8") as f: + flow_graph = json.load(f) + data_graph = flow_graph["data"] + nodes = data_graph["nodes"] + # Substitute ZeroShotPrompt with PromptTemplate + # nodes = replace_zero_shot_prompt_with_prompt_template(nodes) + # Add input variables + # nodes = payload.extract_input_variables(nodes) + + # Nodes, edges and root node + edges = data_graph["edges"] + graph = Graph(nodes, edges) + if build: + langchain_object = graph.build() + if hasattr(langchain_object, "verbose"): + langchain_object.verbose = True + + if hasattr(langchain_object, "return_intermediate_steps"): + # https://github.com/hwchase17/langchain/issues/2068 + # Deactivating until we have a frontend solution + # to display intermediate steps + langchain_object.return_intermediate_steps = False + fix_memory_inputs(langchain_object) + return langchain_object + return graph diff --git a/src/backend/langflow/template/frontend_node/agents.py b/src/backend/langflow/template/frontend_node/agents.py index e4fe40187..451dd7eca 100644 --- a/src/backend/langflow/template/frontend_node/agents.py +++ b/src/backend/langflow/template/frontend_node/agents.py @@ -146,7 +146,7 @@ class CSVAgentNode(FrontendNode): ), ], ) - description: str = """Construct a json agent from a CSV and tools.""" + description: str = """Construct a CSV agent from a CSV and tools.""" base_classes: list[str] = ["AgentExecutor"] def to_dict(self): @@ -194,7 +194,7 @@ class InitializeAgentNode(FrontendNode): ), ], ) - description: str = """Construct a json agent from an LLM and tools.""" + description: str = """Construct a zero shot agent from an LLM and tools.""" base_classes: list[str] = ["AgentExecutor", "function"] def to_dict(self): diff --git a/src/backend/langflow/template/frontend_node/base.py b/src/backend/langflow/template/frontend_node/base.py index a64195813..6d00cead0 100644 --- a/src/backend/langflow/template/frontend_node/base.py +++ b/src/backend/langflow/template/frontend_node/base.py @@ -117,14 +117,30 @@ class FrontendNode(BaseModel): ) -> None: """Handles specific field values for certain fields.""" if key == "headers": - field.value = """{'Authorization': - 'Bearer '}""" - if name == "OpenAI" and key == "model_name": - field.options = constants.OPENAI_MODELS - field.is_list = True - elif name == "ChatOpenAI" and key == "model_name": - field.options = constants.CHAT_OPENAI_MODELS + field.value = """{'Authorization': 'Bearer '}""" + FrontendNode._handle_model_specific_field_values(field, key, name) + FrontendNode._handle_api_key_specific_field_values(field, key, name) + + @staticmethod + def _handle_model_specific_field_values( + field: TemplateField, key: str, name: Optional[str] = None + ) -> None: + """Handles specific field values related to models.""" + model_dict = { + "OpenAI": constants.OPENAI_MODELS, + "ChatOpenAI": constants.CHAT_OPENAI_MODELS, + "Anthropic": constants.ANTHROPIC_MODELS, + "ChatAnthropic": constants.ANTHROPIC_MODELS, + } + if name in model_dict and key == "model_name": + field.options = model_dict[name] field.is_list = True + + @staticmethod + def _handle_api_key_specific_field_values( + field: TemplateField, key: str, name: Optional[str] = None + ) -> None: + """Handles specific field values related to API keys.""" if "api_key" in key and "OpenAI" in str(name): field.display_name = "OpenAI API Key" field.required = False diff --git a/src/backend/langflow/template/frontend_node/llms.py b/src/backend/langflow/template/frontend_node/llms.py index 272770e2e..39e82422f 100644 --- a/src/backend/langflow/template/frontend_node/llms.py +++ b/src/backend/langflow/template/frontend_node/llms.py @@ -12,6 +12,18 @@ class LLMFrontendNode(FrontendNode): field.name.title().replace("Openai", "OpenAI").replace("_", " ") ).replace("Api", "API") + @staticmethod + def format_azure_field(field: TemplateField): + if field.name == "model_name": + field.show = False # Azure uses deployment_name instead of model_name. + if field.name == "openai_api_type": + field.show = False + field.password = False + field.value = "azure" + if field.name == "openai_api_version": + field.password = False + field.value = "2023-03-15-preview" + @staticmethod def format_field(field: TemplateField, name: Optional[str] = None) -> None: display_names_dict = { @@ -43,8 +55,16 @@ class LLMFrontendNode(FrontendNode): field.field_type = "code" field.advanced = True field.show = True - elif field.name in ["model_name", "temperature", "model_file", "model_type"]: + elif field.name in [ + "model_name", + "temperature", + "model_file", + "model_type", + "deployment_name", + ]: field.advanced = False field.show = True LLMFrontendNode.format_openai_field(field) + if "azure" in name.lower(): + LLMFrontendNode.format_azure_field(field) diff --git a/src/backend/langflow/template/frontend_node/tools.py b/src/backend/langflow/template/frontend_node/tools.py index 2819be4d9..4e97fec8c 100644 --- a/src/backend/langflow/template/frontend_node/tools.py +++ b/src/backend/langflow/template/frontend_node/tools.py @@ -59,11 +59,33 @@ class ToolNode(FrontendNode): return super().to_dict() -class PythonFunctionNode(FrontendNode): - name: str = "PythonFunction" +class PythonFunctionToolNode(FrontendNode): + name: str = "PythonFunctionTool" template: Template = Template( - type_name="python_function", + type_name="PythonFunctionTool", fields=[ + TemplateField( + field_type="str", + required=True, + placeholder="", + is_list=False, + show=True, + multiline=False, + value="", + name="name", + advanced=False, + ), + TemplateField( + field_type="str", + required=True, + placeholder="", + is_list=False, + show=True, + multiline=False, + value="", + name="description", + advanced=False, + ), TemplateField( field_type="code", required=True, @@ -73,11 +95,11 @@ class PythonFunctionNode(FrontendNode): value=DEFAULT_PYTHON_FUNCTION, name="code", advanced=False, - ) + ), ], ) description: str = "Python function to be executed." - base_classes: list[str] = ["function"] + base_classes: list[str] = ["Tool"] def to_dict(self): return super().to_dict() diff --git a/src/backend/langflow/utils/constants.py b/src/backend/langflow/utils/constants.py index 2d101ab98..1b6bbdcc3 100644 --- a/src/backend/langflow/utils/constants.py +++ b/src/backend/langflow/utils/constants.py @@ -7,6 +7,20 @@ OPENAI_MODELS = [ ] CHAT_OPENAI_MODELS = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"] +ANTHROPIC_MODELS = [ + "claude-v1", # largest model, ideal for a wide range of more complex tasks. + "claude-v1-100k", # An enhanced version of claude-v1 with a 100,000 token (roughly 75,000 word) context window. + "claude-instant-v1", # A smaller model with far lower latency, sampling at roughly 40 words/sec! + "claude-instant-v1-100k", # Like claude-instant-v1 with a 100,000 token context window but retains its performance. + # Specific sub-versions of the above models: + "claude-v1.3", # Vs claude-v1.2: better instruction-following, code, and non-English dialogue and writing. + "claude-v1.3-100k", # An enhanced version of claude-v1.3 with a 100,000 token (roughly 75,000 word) context window. + "claude-v1.2", # Vs claude-v1.1: small adv in general helpfulness, instruction following, coding, and other tasks. + "claude-v1.0", # An earlier version of claude-v1. + "claude-instant-v1.1", # Latest version of claude-instant-v1. Better than claude-instant-v1.0 at most tasks. + "claude-instant-v1.1-100k", # Version of claude-instant-v1.1 with a 100K token context window. + "claude-instant-v1.0", # An earlier version of claude-instant-v1. +] DEFAULT_PYTHON_FUNCTION = """ def python_function(text: str) -> str: diff --git a/src/backend/langflow/utils/util.py b/src/backend/langflow/utils/util.py index 293d31154..f4e4927d8 100644 --- a/src/backend/langflow/utils/util.py +++ b/src/backend/langflow/utils/util.py @@ -302,7 +302,9 @@ def format_dict(d, name: Optional[str] = None): elif name == "ChatOpenAI" and key == "model_name": value["options"] = constants.CHAT_OPENAI_MODELS value["list"] = True - + elif (name == "Anthropic" or name == "ChatAnthropic") and key == "model_name": + value["options"] = constants.ANTHROPIC_MODELS + value["list"] = True return d diff --git a/src/frontend/package-lock.json b/src/frontend/package-lock.json index 374236168..9000c133f 100644 --- a/src/frontend/package-lock.json +++ b/src/frontend/package-lock.json @@ -13,6 +13,7 @@ "@headlessui/react": "^1.7.10", "@heroicons/react": "^2.0.15", "@mui/material": "^5.11.9", + "@radix-ui/react-tooltip": "^1.0.6", "@tabler/icons-react": "^2.18.0", "@tailwindcss/forms": "^0.5.3", "@tailwindcss/line-clamp": "^0.4.4", @@ -20,7 +21,10 @@ "ansi-to-html": "^0.7.2", "axios": "^1.3.2", "base64-js": "^1.5.1", + "class-variance-authority": "^0.6.0", + "clsx": "^1.2.1", "lodash": "^4.17.21", + "lucide-react": "^0.233.0", "react": "^18.2.0", "react-ace": "^10.1.0", "react-cookie": "^4.1.1", @@ -37,6 +41,8 @@ "rehype-mathjax": "^4.0.2", "remark-gfm": "^3.0.1", "remark-math": "^5.1.1", + "tailwind-merge": "^1.13.0", + "tailwindcss-animate": "^1.0.5", "uuid": "^9.0.0", "vite-plugin-svgr": "^3.2.0", "web-vitals": "^2.1.4" @@ -911,6 +917,18 @@ "@floating-ui/core": "^1.2.6" } }, + "node_modules/@floating-ui/react-dom": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.0.tgz", + "integrity": "sha512-Ke0oU3SeuABC2C4OFu2mSAwHIP5WUiV98O9YWoHV4Q5aT6E9k06DV0Khi5uYspR8xmmBk08t8ZDcz3TR3ARkEg==", + "dependencies": { + "@floating-ui/dom": "^1.2.7" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, "node_modules/@headlessui/react": { "version": "1.7.10", "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.10.tgz", @@ -1274,6 +1292,407 @@ "url": "https://opencollective.com/popperjs" } }, + "node_modules/@radix-ui/primitive": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.0.1.tgz", + "integrity": "sha512-yQ8oGX2GVsEYMWGxcovu1uGWPCxV5BFfeeYxqPmuAzUyLT9qmaMXSAhXpb0WrspIeqYzdJpkh2vHModJPgRIaw==", + "dependencies": { + "@babel/runtime": "^7.13.10" + } + }, + "node_modules/@radix-ui/react-arrow": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.0.3.tgz", + "integrity": "sha512-wSP+pHsB/jQRaL6voubsQ/ZlrGBHHrOjmBnr19hxYgtS0WvAFwZhK2WP/YY5yF9uKECCEEDGxuLxq1NBK51wFA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-compose-refs": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.0.1.tgz", + "integrity": "sha512-fDSBgd44FKHa1FRMU59qBMPFcl2PZE+2nmqunj+BWFyYYjnhIDWL2ItDs3rrbJDQOtzt5nIebLCQc4QRfz6LJw==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-context": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.0.1.tgz", + "integrity": "sha512-ebbrdFoYTcuZ0v4wG5tedGnp9tzcV8awzsxYph7gXUyvnNLuTIcCk1q17JEbnVhXAKG9oX3KtchwiMIAYp9NLg==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.0.4.tgz", + "integrity": "sha512-7UpBa/RKMoHJYjie1gkF1DlK8l1fdU/VKDpoS3rCCo8YBJR294GwcEHyxHw72yvphJ7ld0AXEcSLAzY2F/WyCg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-escape-keydown": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-id": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.0.1.tgz", + "integrity": "sha512-tI7sT/kqYp8p96yGWY1OAnLHrqDgzHefRBKQ2YAkBS5ja7QLcZ9Z/uY7bEjPUatf8RomoXM8/1sMj1IJaE5UzQ==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.1.2.tgz", + "integrity": "sha512-1CnGGfFi/bbqtJZZ0P/NQY20xdG3E0LALJaLUEoKwPLwl6PPPfbeiCqMVQnhoFRAxjJj4RpBRJzDmUgsex2tSg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.0.3", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-layout-effect": "1.0.1", + "@radix-ui/react-use-rect": "1.0.1", + "@radix-ui/react-use-size": "1.0.1", + "@radix-ui/rect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.0.3.tgz", + "integrity": "sha512-xLYZeHrWoPmA5mEKEfZZevoVRK/Q43GfzRXkWV6qawIWWK8t6ifIiLQdd7rmQ4Vk1bmI21XhqF9BN3jWf+phpA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-presence": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.0.1.tgz", + "integrity": "sha512-UXLW4UAbIY5ZjcvzjfRFo5gxva8QirC9hF7wRE4U5gz+TP0DbRk+//qyuAQ1McDxBt1xNMBTaciFGvEmJvAZCg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-1.0.3.tgz", + "integrity": "sha512-yi58uVyoAcK/Nq1inRY56ZSjKypBNKTa/1mcL8qdl6oJeEaDbOldlzrGn7P6Q3Id5d+SYNGc5AJgc4vGhjs5+g==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-slot": "1.0.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slot": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.0.2.tgz", + "integrity": "sha512-YeTpuq4deV+6DusvVUW4ivBgnkHwECUu0BiN43L5UCDFgdhsRUWAghhTF5MbvNTPzmiFOx90asDSUjWuCNapwg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.0.6.tgz", + "integrity": "sha512-DmNFOiwEc2UDigsYj6clJENma58OelxD24O4IODoZ+3sQc3Zb+L8w1EP+y9laTuKCLAysPw4fD6/v0j4KNV8rg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-dismissable-layer": "1.0.4", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-popper": "1.1.2", + "@radix-ui/react-portal": "1.0.3", + "@radix-ui/react-presence": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-slot": "1.0.2", + "@radix-ui/react-use-controllable-state": "1.0.1", + "@radix-ui/react-visually-hidden": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.0.1.tgz", + "integrity": "sha512-D94LjX4Sp0xJFVaoQOd3OO9k7tpBYNOXdVhkltUbGv2Qb9OXdrg/CpsjlZv7ia14Sylv398LswWBVVu5nqKzAQ==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.0.1.tgz", + "integrity": "sha512-Svl5GY5FQeN758fWKrjM6Qb7asvXeiZltlT4U2gVfl8Gx5UAv2sMR0LWo8yhsIZh2oQ0eFdZ59aoOOMV7b47VA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-callback-ref": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.0.3.tgz", + "integrity": "sha512-vyL82j40hcFicA+M4Ex7hVkB9vHgSse1ZWomAqV2Je3RleKGO5iM8KMOEtfoSB0PnIelMd2lATjTGMYqN5ylTg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-callback-ref": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.0.1.tgz", + "integrity": "sha512-v/5RegiJWYdoCvMnITBkNNx6bCj20fiaJnWtRkU18yITptraXjffz5Qbn05uOiQnOvi+dbkznkoaMltz1GnszQ==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-rect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.0.1.tgz", + "integrity": "sha512-Cq5DLuSiuYVKNU8orzJMbl15TXilTnJKUCltMVQg53BQOF1/C5toAaGrowkgksdBQ9H+SRL23g0HDmg9tvmxXw==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/rect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-size": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.0.1.tgz", + "integrity": "sha512-ibay+VqrgcaI6veAojjofPATwledXiSmX+C0KrBk/xgpX9rBzPV3OsfwlhQdUOFbh+LKQorLYT+xTXW9V8yd0g==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.0.3.tgz", + "integrity": "sha512-D4w41yN5YRKtu464TLnByKzMDG/JlMPHtfZgQAu9v6mNakUqGUI9vUrfQKz8NK41VMm/xbZbh76NUTVtIYqOMA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/rect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.0.1.tgz", + "integrity": "sha512-fyrgCaedtvMg9NK3en0pnOYJdtfwxUcNolezkNPUsoX57X8oQk+NkqcvzHXD2uKNij6GXmWU9NDru2IWjrO4BQ==", + "dependencies": { + "@babel/runtime": "^7.13.10" + } + }, "node_modules/@reactflow/background": { "version": "11.1.7", "resolved": "https://registry.npmjs.org/@reactflow/background/-/background-11.1.7.tgz", @@ -2422,7 +2841,7 @@ "version": "18.2.4", "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.4.tgz", "integrity": "sha512-G2mHoTMTL4yoydITgOGwWdWMVd8sNgyEP85xVmMKAPUBwQWm9wBPQUmvbeF4V3WBY1P7mmL4BkjQ0SqUpf1snw==", - "dev": true, + "devOptional": true, "dependencies": { "@types/react": "*" } @@ -2947,6 +3366,25 @@ "node": ">= 6" } }, + "node_modules/class-variance-authority": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.6.0.tgz", + "integrity": "sha512-qdRDgfjx3GRb9fpwpSvn+YaidnT7IUJNe4wt5/SWwM+PmUwJUhQRk/8zAyNro0PmVfmen2635UboTjIBXXxy5A==", + "dependencies": { + "clsx": "1.2.1" + }, + "funding": { + "url": "https://joebell.co.uk" + }, + "peerDependencies": { + "typescript": ">= 4.5.5 < 6" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, "node_modules/classcat": { "version": "5.0.4", "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.4.tgz", @@ -4728,6 +5166,14 @@ "yallist": "^3.0.2" } }, + "node_modules/lucide-react": { + "version": "0.233.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.233.0.tgz", + "integrity": "sha512-r0jMHF0vPDq2wBbZ0B3rtIcBjDyWDKpHu+vAjD2OHn2WLUr3HN5IHovtO0EMgQXuSI7YrMZbjsEZWC2uBHr8nQ==", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0" + } + }, "node_modules/lz-string": { "version": "1.4.4", "resolved": "https://registry.npmjs.org/lz-string/-/lz-string-1.4.4.tgz", @@ -7001,6 +7447,15 @@ "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==" }, + "node_modules/tailwind-merge": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-1.13.0.tgz", + "integrity": "sha512-mUTmDbcU+IhOvJ0c42eLQ/nRkvolTqfpVaVQRSxfJAv9TabS6Y2zW/1wKpKLdKzyL3Gh8j6NTLl6MWNmvOM6kA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, "node_modules/tailwindcss": { "version": "3.3.2", "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.3.2.tgz", @@ -7038,6 +7493,14 @@ "node": ">=14.0.0" } }, + "node_modules/tailwindcss-animate": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tailwindcss-animate/-/tailwindcss-animate-1.0.5.tgz", + "integrity": "sha512-UU3qrOJ4lFQABY+MVADmBm+0KW3xZyhMdRvejwtXqYOL7YjHYxmuREFAZdmVG5LPe5E9CAst846SLC4j5I3dcw==", + "peerDependencies": { + "tailwindcss": ">=3.0.0 || insiders" + } + }, "node_modules/terser": { "version": "5.16.3", "resolved": "https://registry.npmjs.org/terser/-/terser-5.16.3.tgz", @@ -7156,7 +7619,7 @@ "version": "5.0.4", "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.0.4.tgz", "integrity": "sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==", - "dev": true, + "devOptional": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" diff --git a/src/frontend/package.json b/src/frontend/package.json index bd846ce19..c7af46608 100644 --- a/src/frontend/package.json +++ b/src/frontend/package.json @@ -8,6 +8,7 @@ "@headlessui/react": "^1.7.10", "@heroicons/react": "^2.0.15", "@mui/material": "^5.11.9", + "@radix-ui/react-tooltip": "^1.0.6", "@tabler/icons-react": "^2.18.0", "@tailwindcss/forms": "^0.5.3", "@tailwindcss/line-clamp": "^0.4.4", @@ -15,7 +16,10 @@ "ansi-to-html": "^0.7.2", "axios": "^1.3.2", "base64-js": "^1.5.1", + "class-variance-authority": "^0.6.0", + "clsx": "^1.2.1", "lodash": "^4.17.21", + "lucide-react": "^0.233.0", "react": "^18.2.0", "react-ace": "^10.1.0", "react-cookie": "^4.1.1", @@ -32,6 +36,8 @@ "rehype-mathjax": "^4.0.2", "remark-gfm": "^3.0.1", "remark-math": "^5.1.1", + "tailwind-merge": "^1.13.0", + "tailwindcss-animate": "^1.0.5", "uuid": "^9.0.0", "vite-plugin-svgr": "^3.2.0", "web-vitals": "^2.1.4" diff --git a/src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/index.tsx b/src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/index.tsx index 653248763..dc77c2877 100644 --- a/src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/index.tsx +++ b/src/frontend/src/CustomNodes/GenericNode/components/parameterComponent/index.tsx @@ -1,6 +1,11 @@ import { Handle, Position, useUpdateNodeInternals } from "reactflow"; import Tooltip from "../../../../components/TooltipComponent"; -import { classNames, isValidConnection } from "../../../../utils"; +import { + classNames, + groupByFamily, + isValidConnection, + toFirstUpperCase, +} from "../../../../utils"; import { useContext, useEffect, useRef, useState } from "react"; import InputComponent from "../../../../components/inputComponent"; import ToggleComponent from "../../../../components/toggleComponent"; @@ -15,6 +20,10 @@ import InputFileComponent from "../../../../components/inputFileComponent"; import { TabsContext } from "../../../../contexts/tabsContext"; import IntComponent from "../../../../components/intComponent"; import PromptAreaComponent from "../../../../components/promptComponent"; +import { nodeNames, nodeIcons } from "../../../../utils"; +import React from "react"; +import { nodeColors } from "../../../../utils"; +import ShadTooltip from "../../../../components/ShadTooltipComponent"; export default function ParameterComponent({ left, @@ -28,6 +37,7 @@ export default function ParameterComponent({ required = false, }: ParameterComponentType) { const ref = useRef(null); + const refHtml = useRef(null); const updateNodeInternals = useUpdateNodeInternals(); const [position, setPosition] = useState(0); useEffect(() => { @@ -48,6 +58,48 @@ export default function ParameterComponent({ let disabled = reactFlowInstance?.getEdges().some((e) => e.targetHandle === id) ?? false; const { save } = useContext(TabsContext); + const [myData, setMyData] = useState(useContext(typesContext).data); + + useEffect(() => { + const groupedObj = groupByFamily(myData, tooltipTitle); + + refHtml.current = groupedObj.map((item, i) => ( + 0 ? "items-center flex mt-3" : "items-center flex" + )} + > +
+ {React.createElement(nodeIcons[item.family])} +
+ + {nodeNames[item.family] ?? ""}{" "} + + {" "} + -  + {item.type.split(", ").length > 2 + ? item.type.split(", ").map((el, i) => ( + <> + + {i == item.type.split(", ").length - 1 + ? el + : (el += `, `)} + + {i % 2 == 0 && i > 0 &&

} + + )) + : item.type} +
+
+
+ )); + }, [tooltipTitle]); return (
) : ( - + - + )} {left === true && diff --git a/src/frontend/src/CustomNodes/GenericNode/index.tsx b/src/frontend/src/CustomNodes/GenericNode/index.tsx index 1a7f93ae5..79a241160 100644 --- a/src/frontend/src/CustomNodes/GenericNode/index.tsx +++ b/src/frontend/src/CustomNodes/GenericNode/index.tsx @@ -28,8 +28,8 @@ import NodeModal from "../../modals/NodeModal"; import { useCallback } from "react"; import { TabsContext } from "../../contexts/tabsContext"; import { debounce } from "../../utils"; -import TooltipReact from "../../components/ReactTooltipComponent"; import Tooltip from "../../components/TooltipComponent"; +import ShadTooltip from "../../components/ShadTooltipComponent"; export default function GenericNode({ data, selected, @@ -115,14 +115,9 @@ export default function GenericNode({ }} />
- +
{data.type}
-
+
@@ -253,11 +248,7 @@ export default function GenericNode({ : toTitleCase(t) } name={t} - tooltipTitle={ - "Type: " + - data.node.template[t].type + - (data.node.template[t].list ? " list" : "") - } + tooltipTitle={data.node.template[t].type} required={data.node.template[t].required} id={data.node.template[t].type + "|" + t + "|" + data.id} left={true} @@ -283,7 +274,7 @@ export default function GenericNode({ data={data} color={nodeColors[types[data.type]] ?? nodeColors.unknown} title={data.type} - tooltipTitle={`Type: ${data.node.base_classes.join(" | ")}`} + tooltipTitle={`${data.node.base_classes.join("\n")}`} id={[data.type, data.id, ...data.node.base_classes].join("|")} type={data.node.base_classes.join("|")} left={false} diff --git a/src/frontend/src/components/ReactTooltipComponent/index.tsx b/src/frontend/src/components/ReactTooltipComponent/index.tsx index aa736c212..cb2a54f7c 100644 --- a/src/frontend/src/components/ReactTooltipComponent/index.tsx +++ b/src/frontend/src/components/ReactTooltipComponent/index.tsx @@ -37,13 +37,15 @@ const TooltipReact: FC = ({ id={selector} content={content} className={classNames( - "!bg-white !text-xs !font-normal !text-gray-700 !shadow-md !opacity-100 z-20", + "!bg-white !text-xs !font-normal !text-gray-700 !shadow-md !opacity-100 z-[9999]", className )} place={position} clickable={clickable} isOpen={disabled ? false : undefined} delayShow={delayShow} + positionStrategy="absolute" + float={true} > {htmlContent && htmlContent} diff --git a/src/frontend/src/components/ShadTooltipComponent/index.tsx b/src/frontend/src/components/ShadTooltipComponent/index.tsx new file mode 100644 index 000000000..a360f3ff0 --- /dev/null +++ b/src/frontend/src/components/ShadTooltipComponent/index.tsx @@ -0,0 +1,25 @@ +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from "../ui/tooltip"; + +const ShadTooltip = (props) => { + return ( + + + {props.children} + + {props.content} + + + + ); +}; + +export default ShadTooltip; diff --git a/src/frontend/src/components/dropdownComponent/index.tsx b/src/frontend/src/components/dropdownComponent/index.tsx index eebbe1554..0a214eb4e 100644 --- a/src/frontend/src/components/dropdownComponent/index.tsx +++ b/src/frontend/src/components/dropdownComponent/index.tsx @@ -25,7 +25,9 @@ export default function Dropdown({ <>
- {internalValue} + + {internalValue} + , + React.ComponentPropsWithoutRef +>(({ className, sideOffset = 4, ...props }, ref) => ( + +)); +TooltipContent.displayName = TooltipPrimitive.Content.displayName; + +export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }; diff --git a/src/frontend/src/contexts/darkContext.tsx b/src/frontend/src/contexts/darkContext.tsx index f612fe381..2a76d0e53 100644 --- a/src/frontend/src/contexts/darkContext.tsx +++ b/src/frontend/src/contexts/darkContext.tsx @@ -13,13 +13,16 @@ const initialValue = { export const darkContext = createContext(initialValue); export function DarkProvider({ children }) { - const [dark, setDark] = useState(false); + const [dark, setDark] = useState( + JSON.parse(window.localStorage.getItem("isDark")) ?? false + ); useEffect(() => { if (dark) { document.getElementById("body").classList.add("dark"); } else { document.getElementById("body").classList.remove("dark"); } + window.localStorage.setItem("isDark", dark.toString()); }, [dark]); return ( ( ); export function TabsProvider({ children }: { children: ReactNode }) { - const { setNoticeData } = useContext(alertContext); + const { setErrorData, setNoticeData } = useContext(alertContext); const [tabIndex, setTabIndex] = useState(0); const [flows, setFlows] = useState>([]); const [id, setId] = useState(uuidv4()); @@ -95,25 +95,25 @@ export function TabsProvider({ children }: { children: ReactNode }) { edge.style = { stroke: "#555555" }; }); flow.data.nodes.forEach((node) => { - if (Object.keys(templates[node.data.type]["template"]).length > 0) { - node.data.node.base_classes = - templates[node.data.type]["base_classes"]; + const template = templates[node.data.type]; + if (!template) { + setErrorData({ title: `Unknown node type: ${node.data.type}` }); + return; + } + if (Object.keys(template["template"]).length > 0) { + node.data.node.base_classes = template["base_classes"]; flow.data.edges.forEach((edge) => { if (edge.source === node.id) { edge.sourceHandle = edge.sourceHandle .split("|") .slice(0, 2) - .concat(templates[node.data.type]["base_classes"]) + .concat(template["base_classes"]) .join("|"); } }); - node.data.node.description = - templates[node.data.type]["description"]; + node.data.node.description = template["description"]; node.data.node.template = updateTemplate( - templates[node.data.type][ - "template" - ] as unknown as APITemplateType, - + template["template"] as unknown as APITemplateType, node.data.node.template as APITemplateType ); } @@ -316,21 +316,25 @@ export function TabsProvider({ children }: { children: ReactNode }) { edge.animated = edge.targetHandle.split("|")[0] === "Text"; }); data.nodes.forEach((node) => { - if (Object.keys(templates[node.data.type]["template"]).length > 0) { - node.data.node.base_classes = - templates[node.data.type]["base_classes"]; + const template = templates[node.data.type]; + if (!template) { + setErrorData({ title: `Unknown node type: ${node.data.type}` }); + return; + } + if (Object.keys(template["template"]).length > 0) { + node.data.node.base_classes = template["base_classes"]; flow.data.edges.forEach((edge) => { if (edge.source === node.id) { edge.sourceHandle = edge.sourceHandle .split("|") .slice(0, 2) - .concat(templates[node.data.type]["base_classes"]) + .concat(template["base_classes"]) .join("|"); } }); - node.data.node.description = templates[node.data.type]["description"]; + node.data.node.description = template["description"]; node.data.node.template = updateTemplate( - templates[node.data.type]["template"] as unknown as APITemplateType, + template["template"] as unknown as APITemplateType, node.data.node.template as APITemplateType ); } diff --git a/src/frontend/src/controllers/API/index.ts b/src/frontend/src/controllers/API/index.ts index f6f46404b..0cffd04bf 100644 --- a/src/frontend/src/controllers/API/index.ts +++ b/src/frontend/src/controllers/API/index.ts @@ -14,13 +14,13 @@ export async function sendAll(data: sendAllProps) { export async function checkCode( code: string ): Promise> { - return await axios.post("/validate/code", { code }); + return await axios.post("api/v1/validate/code", { code }); } export async function checkPrompt( template: string ): Promise> { - return await axios.post("/validate/prompt", { template }); + return await axios.post("api/v1/validate/prompt", { template }); } export async function getExamples(): Promise { diff --git a/src/frontend/src/icons/Anthropic/anthropic.svg b/src/frontend/src/icons/Anthropic/anthropic.svg new file mode 100644 index 000000000..67ae02ea5 --- /dev/null +++ b/src/frontend/src/icons/Anthropic/anthropic.svg @@ -0,0 +1,9 @@ + + + + + + + \ No newline at end of file diff --git a/src/frontend/src/icons/Anthropic/anthropic_box.svg b/src/frontend/src/icons/Anthropic/anthropic_box.svg new file mode 100644 index 000000000..fa9923ed7 --- /dev/null +++ b/src/frontend/src/icons/Anthropic/anthropic_box.svg @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/src/frontend/src/icons/Anthropic/index.tsx b/src/frontend/src/icons/Anthropic/index.tsx new file mode 100644 index 000000000..4cdf8f910 --- /dev/null +++ b/src/frontend/src/icons/Anthropic/index.tsx @@ -0,0 +1,9 @@ +import React, { forwardRef } from "react"; +import { ReactComponent as AnthropicSVG } from "./anthropic_box.svg"; + +export const AnthropicIcon = forwardRef< + SVGSVGElement, + React.PropsWithChildren<{}> +>((props, ref) => { + return ; +}); diff --git a/src/frontend/src/index.css b/src/frontend/src/index.css index 67f44ca1c..14580c658 100644 --- a/src/frontend/src/index.css +++ b/src/frontend/src/index.css @@ -2,22 +2,88 @@ @tailwind components; @tailwind utilities; +@layer base { + :root { + --background: 0 0% 100%; + --foreground: 222.2 47.4% 11.2%; + --muted: 210 40% 96.1%; + --muted-foreground: 215.4 16.3% 46.9%; + --popover: 0 0% 100%; + --popover-foreground: 222.2 47.4% 11.2%; + --card: 0 0% 100%; + --card-foreground: 222.2 47.4% 11.2%; + --border: 214.3 31.8% 91.4%; + --input: 214.3 31.8% 91.4%; + --primary: 222.2 47.4% 11.2%; + --primary-foreground: 210 40% 98%; + --secondary: 210 40% 96.1%; + --secondary-foreground: 222.2 47.4% 11.2%; + --accent: 210 40% 96.1%; + --accent-foreground: 222.2 47.4% 11.2%; + --destructive: 0 100% 50%; + --destructive-foreground: 210 40% 98%; + --ring: 215 20.2% 65.1%; + --radius: 0.5rem; + } + + .dark { + -background: 224 71% 4%; + -foreground: 213 31% 91%; + -muted: 223 47% 11%; + -muted-foreground: 215.4 16.3% 56.9%; + -popover: 224 71% 4%; + -popover-foreground: 215 20.2% 65.1%; + -card: 224 71% 4%; + -card-foreground: 213 31% 91%; + -border: 216 34% 17%; + -input: 216 34% 17%; + -primary: 210 40% 98%; + -primary-foreground: 222.2 47.4% 1.2%; + -secondary: 222.2 47.4% 11.2%; + -secondary-foreground: 210 40% 98%; + -accent: 216 34% 17%; + -accent-foreground: 210 40% 98%; + -destructive: 0 63% 31%; + -destructive-foreground: 210 40% 98%; + -ring: 216 34% 17%; + -radius: 0.5rem; + } +} + + @layer base { + * { + @apply border-border; + } + body { + @apply bg-background text-foreground; + font-feature-settings: "rlig" 1, "calt" 1; + } + } + +@layer base { + * { + @apply border-border; + } + body { + @apply bg-background text-foreground; + font-feature-settings: "rlig" 1, "calt" 1; + } +} + body { - margin: 0; - font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", - "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", - sans-serif; - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale; + margin: 0; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", + "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", + sans-serif; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; } code { - font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New", - monospace; + font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New", + monospace; } -/* The style below sets the cursor property of the element with the class .react-flow__pane to the default cursor. -The cursor: default; property value restores the browser's default cursor style for the targeted element. By applying this style, the element will no longer have a custom cursor appearance such as "grab" or any other custom cursor defined elsewhere in the application. Instead, it will revert to the default cursor style determined by the browser, typically an arrow-shaped cursor. */ .react-flow__pane { cursor: default; } diff --git a/src/frontend/src/modals/chatModal/index.tsx b/src/frontend/src/modals/chatModal/index.tsx index cf2b52aac..39bb72994 100644 --- a/src/frontend/src/modals/chatModal/index.tsx +++ b/src/frontend/src/modals/chatModal/index.tsx @@ -182,10 +182,10 @@ export default function ChatModal({ try { const urlWs = process.env.NODE_ENV === "development" - ? `ws://localhost:7860/chat/${id.current}` + ? `ws://localhost:7860/api/v1/chat/${id.current}` : `${window.location.protocol === "https:" ? "wss" : "ws"}://${ window.location.host - }/chat/${id.current}`; + }api/v1/chat/${id.current}`; const newWs = new WebSocket(urlWs); newWs.onopen = () => { console.log("WebSocket connection established!"); diff --git a/src/frontend/src/pages/FlowPage/components/DisclosureComponent/index.tsx b/src/frontend/src/pages/FlowPage/components/DisclosureComponent/index.tsx index 31bf78156..6878ad3c4 100644 --- a/src/frontend/src/pages/FlowPage/components/DisclosureComponent/index.tsx +++ b/src/frontend/src/pages/FlowPage/components/DisclosureComponent/index.tsx @@ -5,6 +5,7 @@ import { DisclosureComponentType } from "../../../../types/components"; export default function DisclosureComponent({ button: { title, Icon, buttons = [] }, children, + openDisc, }: DisclosureComponentType) { return ( @@ -27,14 +28,14 @@ export default function DisclosureComponent({
- + {children} diff --git a/src/frontend/src/pages/FlowPage/components/extraSidebarComponent/index.tsx b/src/frontend/src/pages/FlowPage/components/extraSidebarComponent/index.tsx index e8dbfbb6b..f0fe333d5 100644 --- a/src/frontend/src/pages/FlowPage/components/extraSidebarComponent/index.tsx +++ b/src/frontend/src/pages/FlowPage/components/extraSidebarComponent/index.tsx @@ -9,12 +9,13 @@ import { import { useContext, useEffect, useState, useRef } from "react"; import { typesContext } from "../../../../contexts/typesContext"; import { APIClassType, APIObjectType } from "../../../../types/api"; -import TooltipReact from "../../../../components/ReactTooltipComponent"; import { MagnifyingGlassIcon } from "@heroicons/react/24/outline"; +import ShadTooltip from "../../../../components/ShadTooltipComponent"; export default function ExtraSidebar() { const { data } = useContext(typesContext); const [dataFilter, setFilterData] = useState(data); + const [search, setSearch] = useState(""); function onDragStart( event: React.DragEvent, @@ -58,6 +59,7 @@ export default function ExtraSidebar() { className="dark:text-white focus:outline-none block w-full rounded-md py-1.5 ps-3 pr-9 text-gray-900 shadow-sm ring-1 ring-inset ring-gray-300 placeholder:text-gray-400 sm:text-sm sm:leading-6 dark:ring-0 dark:bg-[#2d3747] dark:focus:outline-none" onChange={(e) => { handleSearchInput(e.target.value); + setSearch(e.target.value); }} />
@@ -71,6 +73,7 @@ export default function ExtraSidebar() { .map((d: keyof APIObjectType, i) => Object.keys(dataFilter[d]).length > 0 ? ( ( -
-
+ ))}
diff --git a/src/frontend/src/pages/FlowPage/components/tabsManagerComponent/index.tsx b/src/frontend/src/pages/FlowPage/components/tabsManagerComponent/index.tsx index 278fe6756..816247911 100644 --- a/src/frontend/src/pages/FlowPage/components/tabsManagerComponent/index.tsx +++ b/src/frontend/src/pages/FlowPage/components/tabsManagerComponent/index.tsx @@ -116,7 +116,7 @@ export default function TabsManagerComponent() { -
+
{flows[tabIndex] ? ( diff --git a/src/frontend/src/types/components/index.ts b/src/frontend/src/types/components/index.ts index e7559023a..889427a3c 100644 --- a/src/frontend/src/types/components/index.ts +++ b/src/frontend/src/types/components/index.ts @@ -3,6 +3,7 @@ import { ReactElement, ReactFragment, ReactNode, + SVGProps, } from "react"; import { NodeDataType } from "../flow/index"; export type InputComponentType = { @@ -56,6 +57,7 @@ export type FileComponentType = { export type DisclosureComponentType = { children: ReactNode; + openDisc: boolean; button: { title: string; Icon: ForwardRefExoticComponent>; diff --git a/src/frontend/src/utils.ts b/src/frontend/src/utils.ts index 559080f3f..cc8615d9d 100644 --- a/src/frontend/src/utils.ts +++ b/src/frontend/src/utils.ts @@ -21,6 +21,7 @@ import { FlowType, NodeType } from "./types/flow"; import { APITemplateType, TemplateVariableType } from "./types/api"; import _ from "lodash"; import { ChromaIcon } from "./icons/ChromaIcon"; +import { AnthropicIcon } from "./icons/Anthropic"; import { AirbyteIcon } from "./icons/Airbyte"; import { AzIcon } from "./icons/AzLogo"; import { BingIcon } from "./icons/Bing"; @@ -47,6 +48,12 @@ import { WolframIcon } from "./icons/Wolfram"; import { WordIcon } from "./icons/Word"; import { SerperIcon } from "./icons/Serper"; import { v4 as uuidv4 } from "uuid"; +import { clsx, type ClassValue } from "clsx"; +import { twMerge } from "tailwind-merge"; + +export function cn(...inputs: ClassValue[]) { + return twMerge(clsx(inputs)); +} export function classNames(...classes: Array) { return classes.filter(Boolean).join(" "); @@ -153,6 +160,8 @@ export const nodeIcons: { AirbyteJSONLoader: AirbyteIcon, // SerpAPIWrapper: SerperIcon, // AZLyricsLoader: AzIcon, + Anthropic: AnthropicIcon, + ChatAnthropic: AnthropicIcon, BingSearchAPIWrapper: BingIcon, BingSearchRun: BingIcon, Cohere: CohereIcon, @@ -632,3 +641,58 @@ export function updateIds(newFlow, getNodeId) { e.targetHandle; }); } + +export function groupByFamily(data, baseClasses) { + let arrOfParent: string[] = []; + let arrOfType: { family: string; type: string }[] = []; + + Object.keys(data).map((d) => { + Object.keys(data[d]).map((n) => { + if ( + data[d][n].base_classes.some((r) => baseClasses.split("\n").includes(r)) + ) { + arrOfParent.push(d); + } + }); + }); + + let uniq = arrOfParent.filter( + (item, index) => arrOfParent.indexOf(item) === index + ); + + Object.keys(data).map((d) => { + Object.keys(data[d]).map((n) => { + baseClasses.split("\n").forEach((tol) => { + data[d][n].base_classes.forEach((data) => { + if (tol == data) { + arrOfType.push({ + family: d, + type: data, + }); + } + }); + }); + }); + }); + + let groupedBy = arrOfType.filter((object, index, self) => { + const foundIndex = self.findIndex( + (o) => o.family === object.family && o.type === object.type + ); + return foundIndex === index; + }); + + let groupedObj = groupedBy.reduce((result, item) => { + const existingGroup = result.find((group) => group.family === item.family); + + if (existingGroup) { + existingGroup.type += `, ${item.type}`; + } else { + result.push({ family: item.family, type: item.type }); + } + + return result; + }, []); + + return groupedObj; +} diff --git a/src/frontend/tailwind.config.js b/src/frontend/tailwind.config.js index 8df8b8c1e..5644a21bc 100644 --- a/src/frontend/tailwind.config.js +++ b/src/frontend/tailwind.config.js @@ -1,11 +1,83 @@ /** @type {import('tailwindcss').Config} */ +const { fontFamily } = require("tailwindcss/defaultTheme") + import plugin from "tailwindcss/plugin"; module.exports = { content: ["./index.html", "./src/**/*.{js,ts,tsx,jsx}"], darkMode: "class", important: true, theme: { + container: { + center: true, + padding: "2rem", + screens: { + "2xl": "1400px", + }, + }, extend: { + colors: { + border: "hsl(var(--border))", + input: "hsl(var(--input))", + ring: "hsl(var(--ring))", + background: "hsl(var(--background))", + foreground: "hsl(var(--foreground))", + primary: { + DEFAULT: "hsl(var(--primary))", + foreground: "hsl(var(--primary-foreground))", + }, + secondary: { + DEFAULT: "hsl(var(--secondary))", + foreground: "hsl(var(--secondary-foreground))", + }, + destructive: { + DEFAULT: "hsl(var(--destructive))", + foreground: "hsl(var(--destructive-foreground))", + }, + muted: { + DEFAULT: "hsl(var(--muted))", + foreground: "hsl(var(--muted-foreground))", + }, + accent: { + DEFAULT: "hsl(var(--accent))", + foreground: "hsl(var(--accent-foreground))", + }, + popover: { + DEFAULT: "hsl(var(--popover))", + foreground: "hsl(var(--popover-foreground))", + }, + card: { + DEFAULT: "hsl(var(--card))", + foreground: "hsl(var(--card-foreground))", + }, + }, + borderRadius: { + lg: `var(--radius)`, + md: `calc(var(--radius) - 2px)`, + sm: "calc(var(--radius) - 4px)", + }, + fontFamily: { + sans: ["var(--font-sans)", ...fontFamily.sans], + }, + keyframes: { + "accordion-down": { + from: { height: 0 }, + to: { height: "var(--radix-accordion-content-height)" }, + }, + "accordion-up": { + from: { height: "var(--radix-accordion-content-height)" }, + to: { height: 0 }, + }, + pulseGreen: { + "0%": { boxShadow: "0 0 0 0 rgba(72, 187, 120, 0.7)" }, + "100%": { boxShadow: "0 0 0 10px rgba(72, 187, 120, 0)" }, + }, + }, + animation: { + "accordion-down": "accordion-down 0.2s ease-out", + "accordion-up": "accordion-up 0.2s ease-out", + "pulse-green": "pulseGreen 1s linear", + 'spin-once': 'spin 1s linear 0.7' + }, borderColor: { "red-outline": "rgba(255, 0, 0, 0.8)", "green-outline": "rgba(72, 187, 120, 0.7)", @@ -14,17 +86,6 @@ module.exports = { "red-outline": "0 0 5px rgba(255, 0, 0, 0.5)", "green-outline": "0 0 5px rgba(72, 187, 120, 0.7)", }, - - animation: { - "pulse-green": "pulseGreen 1s linear", - 'spin-once': 'spin 1s linear 0.7' - }, - keyframes: { - pulseGreen: { - "0%": { boxShadow: "0 0 0 0 rgba(72, 187, 120, 0.7)" }, - "100%": { boxShadow: "0 0 0 10px rgba(72, 187, 120, 0)" }, - }, - }, }, }, plugins: [ @@ -96,4 +157,4 @@ module.exports = { }), require("@tailwindcss/typography"), ], -}; +}; \ No newline at end of file diff --git a/src/frontend/tsconfig.json b/src/frontend/tsconfig.json index 3a05105de..2ed005aff 100644 --- a/src/frontend/tsconfig.json +++ b/src/frontend/tsconfig.json @@ -19,7 +19,8 @@ "isolatedModules": true, "noEmit": true, "jsx": "react-jsx", - "noImplicitAny": false + "noImplicitAny": false, + "baseUrl": "." }, "include": [ "src" diff --git a/src/frontend/vite.config.ts b/src/frontend/vite.config.ts index 172b37733..d4fa2248b 100644 --- a/src/frontend/vite.config.ts +++ b/src/frontend/vite.config.ts @@ -11,7 +11,7 @@ const apiRoutes = [ ]; // Use environment variable to determine the target. -const target = process.env.VITE_PROXY_TARGET || "http://127.0.0.1:7860"; +const target = process.env.VITE_PROXY_TARGET || "http://127.0.0.1:7860/api/v1"; const proxyTargets = apiRoutes.reduce((proxyObj, route) => { proxyObj[route] = { diff --git a/tests/data/complex_example.json b/tests/data/complex_example.json index 8d46a97d6..89a3b9324 100644 --- a/tests/data/complex_example.json +++ b/tests/data/complex_example.json @@ -197,7 +197,7 @@ "y": 136.29836646158452 }, "data": { - "type": "PythonFunction", + "type": "PythonFunctionTool", "node": { "template": { "code": { @@ -210,6 +210,26 @@ "type": "str", "list": false }, + "description": { + "required": true, + "placeholder": "", + "show": true, + "multiline": true, + "value": "My description", + "name": "description", + "type": "str", + "list": false + }, + "name": { + "required": true, + "placeholder": "", + "show": true, + "multiline": true, + "value": "My Tool", + "name": "name", + "type": "str", + "list": false + }, "_type": "python_function" }, "description": "Python function to be executed.", diff --git a/tests/test_agents_template.py b/tests/test_agents_template.py index 7aa8de176..8e181711f 100644 --- a/tests/test_agents_template.py +++ b/tests/test_agents_template.py @@ -5,7 +5,7 @@ from langflow.settings import settings # check that all agents are in settings.agents # are in json_response["agents"] def test_agents_settings(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() agents = json_response["agents"] @@ -13,7 +13,7 @@ def test_agents_settings(client: TestClient): def test_zero_shot_agent(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() agents = json_response["agents"] @@ -52,7 +52,7 @@ def test_zero_shot_agent(client: TestClient): def test_json_agent(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() agents = json_response["agents"] @@ -87,7 +87,7 @@ def test_json_agent(client: TestClient): def test_csv_agent(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() agents = json_response["agents"] @@ -126,7 +126,7 @@ def test_csv_agent(client: TestClient): def test_initialize_agent(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() agents = json_response["agents"] diff --git a/tests/test_cache.py b/tests/test_cache.py index 3d3e951fc..3214e7d15 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -1,10 +1,10 @@ import json +from langflow.graph import Graph +from langflow.processing.process import load_or_build_langchain_object import pytest from langflow.interface.run import ( - build_graph, build_langchain_object_with_caching, - load_or_build_langchain_object, ) @@ -62,7 +62,7 @@ def test_build_langchain_object_with_caching(basic_data_graph): # Test build_graph def test_build_graph(basic_data_graph): - graph = build_graph(basic_data_graph) + graph = Graph.from_payload(basic_data_graph) assert graph is not None assert len(graph.nodes) == len(basic_data_graph["nodes"]) assert len(graph.edges) == len(basic_data_graph["edges"]) diff --git a/tests/test_chains_template.py b/tests/test_chains_template.py index c958cf64d..0c7af56ad 100644 --- a/tests/test_chains_template.py +++ b/tests/test_chains_template.py @@ -3,7 +3,7 @@ from langflow.settings import settings def test_chains_settings(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() chains = json_response["chains"] @@ -12,7 +12,7 @@ def test_chains_settings(client: TestClient): # Test the ConversationChain object def test_conversation_chain(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() chains = json_response["chains"] @@ -94,7 +94,7 @@ def test_conversation_chain(client: TestClient): def test_llm_chain(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() chains = json_response["chains"] @@ -152,7 +152,7 @@ def test_llm_chain(client: TestClient): def test_llm_checker_chain(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() chains = json_response["chains"] @@ -228,7 +228,7 @@ def test_llm_checker_chain(client: TestClient): def test_llm_math_chain(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() chains = json_response["chains"] @@ -306,7 +306,7 @@ def test_llm_math_chain(client: TestClient): def test_series_character_chain(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() chains = json_response["chains"] @@ -368,7 +368,7 @@ def test_series_character_chain(client: TestClient): def test_mid_journey_prompt_chain(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() chains = json_response["chains"] @@ -407,7 +407,7 @@ def test_mid_journey_prompt_chain(client: TestClient): def test_time_travel_guide_chain(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() chains = json_response["chains"] diff --git a/tests/test_custom_types.py b/tests/test_custom_types.py index 399450e2e..7503426ab 100644 --- a/tests/test_custom_types.py +++ b/tests/test_custom_types.py @@ -1,16 +1,23 @@ # Test this: +from langflow.interface.importing.utils import get_function import pytest -from langflow.interface.tools.custom import PythonFunction +from langflow.interface.tools.custom import PythonFunctionTool from langflow.utils import constants def test_python_function(): """Test Python function""" - func = PythonFunction(code=constants.DEFAULT_PYTHON_FUNCTION) - assert func.get_function()("text") == "text" + code = constants.DEFAULT_PYTHON_FUNCTION + func = get_function(code) + func = PythonFunctionTool(name="Test", description="Testing", code=code, func=func) + assert func("text") == "text" # the tool decorator should raise an error if # the function is not str -> str # This raises ValidationError with pytest.raises(SyntaxError): - func = PythonFunction(code=pytest.CODE_WITH_SYNTAX_ERROR) + code = pytest.CODE_WITH_SYNTAX_ERROR + func = get_function(code) + func = PythonFunctionTool( + name="Test", description="Testing", code=code, func=func + ) diff --git a/tests/test_endpoints.py b/tests/test_endpoints.py index 83f6c62b1..9e07dfb24 100644 --- a/tests/test_endpoints.py +++ b/tests/test_endpoints.py @@ -4,7 +4,7 @@ from langflow.interface.tools.constants import CUSTOM_TOOLS def test_get_all(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() # We need to test the custom nodes @@ -21,7 +21,7 @@ import math def square(x): return x ** 2 """ - response1 = client.post("/validate/code", json={"code": code1}) + response1 = client.post("api/v1/validate/code", json={"code": code1}) assert response1.status_code == 200 assert response1.json() == {"imports": {"errors": []}, "function": {"errors": []}} @@ -32,7 +32,7 @@ import non_existent_module def square(x): return x ** 2 """ - response2 = client.post("/validate/code", json={"code": code2}) + response2 = client.post("api/v1/validate/code", json={"code": code2}) assert response2.status_code == 200 assert response2.json() == { "imports": {"errors": ["No module named 'non_existent_module'"]}, @@ -46,7 +46,7 @@ import math def square(x) return x ** 2 """ - response3 = client.post("/validate/code", json={"code": code3}) + response3 = client.post("api/v1/validate/code", json={"code": code3}) assert response3.status_code == 200 assert response3.json() == { "imports": {"errors": []}, @@ -54,11 +54,11 @@ def square(x) } # Test case with invalid JSON payload - response4 = client.post("/validate/code", json={"invalid_key": code1}) + response4 = client.post("api/v1/validate/code", json={"invalid_key": code1}) assert response4.status_code == 422 # Test case with an empty code string - response5 = client.post("/validate/code", json={"code": ""}) + response5 = client.post("api/v1/validate/code", json={"code": ""}) assert response5.status_code == 200 assert response5.json() == {"imports": {"errors": []}, "function": {"errors": []}} @@ -69,7 +69,7 @@ import math def square(x) return x ** 2 """ - response6 = client.post("/validate/code", json={"code": code6}) + response6 = client.post("api/v1/validate/code", json={"code": code6}) assert response6.status_code == 200 assert response6.json() == { "imports": {"errors": []}, @@ -95,13 +95,13 @@ INVALID_PROMPT = "This is an invalid prompt without any input variable." def test_valid_prompt(client: TestClient): - response = client.post("/validate/prompt", json={"template": VALID_PROMPT}) + response = client.post("api/v1/validate/prompt", json={"template": VALID_PROMPT}) assert response.status_code == 200 assert response.json() == {"input_variables": ["product"]} def test_invalid_prompt(client: TestClient): - response = client.post("/validate/prompt", json={"template": INVALID_PROMPT}) + response = client.post("api/v1/validate/prompt", json={"template": INVALID_PROMPT}) assert response.status_code == 200 assert response.json() == {"input_variables": []} @@ -116,7 +116,7 @@ def test_invalid_prompt(client: TestClient): ], ) def test_various_prompts(client, prompt, expected_input_variables): - response = client.post("/validate/prompt", json={"template": prompt}) + response = client.post("api/v1/validate/prompt", json={"template": prompt}) assert response.status_code == 200 assert response.json() == { "input_variables": expected_input_variables, diff --git a/tests/test_graph.py b/tests/test_graph.py index cdbe0ba93..69a926cc3 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -1,22 +1,22 @@ from typing import Type, Union from langflow.graph.edge.base import Edge -from langflow.graph.node.base import Node +from langflow.graph.vertex.base import Vertex import pytest from langchain.chains.base import Chain from langchain.llms.fake import FakeListLLM from langflow.graph import Graph -from langflow.graph.node.types import ( - AgentNode, - ChainNode, - FileToolNode, - LLMNode, - PromptNode, - ToolkitNode, - ToolNode, - WrapperNode, +from langflow.graph.vertex.types import ( + AgentVertex, + ChainVertex, + FileToolVertex, + LLMVertex, + PromptVertex, + ToolkitVertex, + ToolVertex, + WrapperVertex, ) -from langflow.interface.run import get_result_and_thought +from langflow.processing.process import get_result_and_thought from langflow.utils.payload import get_root_node # Test cases for the graph module @@ -25,7 +25,7 @@ from langflow.utils.payload import get_root_node # BASIC_EXAMPLE_PATH, COMPLEX_EXAMPLE_PATH, OPENAPI_EXAMPLE_PATH -def get_node_by_type(graph, node_type: Type[Node]) -> Union[Node, None]: +def get_node_by_type(graph, node_type: Type[Vertex]) -> Union[Vertex, None]: """Get a node by type""" return next((node for node in graph.nodes if isinstance(node, node_type)), None) @@ -35,7 +35,7 @@ def test_graph_structure(basic_graph): assert len(basic_graph.nodes) > 0 assert len(basic_graph.edges) > 0 for node in basic_graph.nodes: - assert isinstance(node, Node) + assert isinstance(node, Vertex) for edge in basic_graph.edges: assert isinstance(edge, Edge) assert edge.source in basic_graph.nodes @@ -158,14 +158,16 @@ def test_get_node_neighbors_complex(complex_graph): tool_neighbors = complex_graph.get_nodes_with_target(tool) assert tool_neighbors is not None # Check if there is a PythonFunction in the tool's neighbors - assert any("PythonFunction" in neighbor.data["type"] for neighbor in tool_neighbors) + assert any( + "PythonFunctionTool" in neighbor.data["type"] for neighbor in tool_neighbors + ) def test_get_node(basic_graph): """Test getting a single node""" node_id = basic_graph.nodes[0].id node = basic_graph.get_node(node_id) - assert isinstance(node, Node) + assert isinstance(node, Vertex) assert node.id == node_id @@ -174,7 +176,7 @@ def test_build_nodes(basic_graph): assert len(basic_graph.nodes) == len(basic_graph._nodes) for node in basic_graph.nodes: - assert isinstance(node, Node) + assert isinstance(node, Vertex) def test_build_edges(basic_graph): @@ -182,8 +184,8 @@ def test_build_edges(basic_graph): assert len(basic_graph.edges) == len(basic_graph._edges) for edge in basic_graph.edges: assert isinstance(edge, Edge) - assert isinstance(edge.source, Node) - assert isinstance(edge.target, Node) + assert isinstance(edge.source, Vertex) + assert isinstance(edge.target, Vertex) def test_get_root_node(basic_graph, complex_graph): @@ -191,13 +193,13 @@ def test_get_root_node(basic_graph, complex_graph): assert isinstance(basic_graph, Graph) root = get_root_node(basic_graph) assert root is not None - assert isinstance(root, Node) + assert isinstance(root, Vertex) assert root.data["type"] == "TimeTravelGuideChain" # For complex example, the root node is a ZeroShotAgent too assert isinstance(complex_graph, Graph) root = get_root_node(complex_graph) assert root is not None - assert isinstance(root, Node) + assert isinstance(root, Vertex) assert root.data["type"] == "ZeroShotAgent" @@ -239,11 +241,10 @@ def test_build_params(basic_graph): assert "memory" in root.params -def test_build(basic_graph, complex_graph, openapi_graph): +def test_build(basic_graph, complex_graph): """Test Node's build method""" assert_agent_was_built(basic_graph) assert_agent_was_built(complex_graph) - assert_agent_was_built(openapi_graph) def assert_agent_was_built(graph): @@ -257,14 +258,14 @@ def assert_agent_was_built(graph): def test_agent_node_build(complex_graph): - agent_node = get_node_by_type(complex_graph, AgentNode) + agent_node = get_node_by_type(complex_graph, AgentVertex) assert agent_node is not None built_object = agent_node.build() assert built_object is not None def test_tool_node_build(complex_graph): - tool_node = get_node_by_type(complex_graph, ToolNode) + tool_node = get_node_by_type(complex_graph, ToolVertex) assert tool_node is not None built_object = tool_node.build() assert built_object is not None @@ -272,7 +273,7 @@ def test_tool_node_build(complex_graph): def test_chain_node_build(complex_graph): - chain_node = get_node_by_type(complex_graph, ChainNode) + chain_node = get_node_by_type(complex_graph, ChainVertex) assert chain_node is not None built_object = chain_node.build() assert built_object is not None @@ -280,7 +281,7 @@ def test_chain_node_build(complex_graph): def test_prompt_node_build(complex_graph): - prompt_node = get_node_by_type(complex_graph, PromptNode) + prompt_node = get_node_by_type(complex_graph, PromptVertex) assert prompt_node is not None built_object = prompt_node.build() assert built_object is not None @@ -288,7 +289,7 @@ def test_prompt_node_build(complex_graph): def test_llm_node_build(basic_graph): - llm_node = get_node_by_type(basic_graph, LLMNode) + llm_node = get_node_by_type(basic_graph, LLMVertex) assert llm_node is not None built_object = llm_node.build() assert built_object is not None @@ -296,7 +297,7 @@ def test_llm_node_build(basic_graph): def test_toolkit_node_build(openapi_graph): - toolkit_node = get_node_by_type(openapi_graph, ToolkitNode) + toolkit_node = get_node_by_type(openapi_graph, ToolkitVertex) assert toolkit_node is not None built_object = toolkit_node.build() assert built_object is not None @@ -304,7 +305,7 @@ def test_toolkit_node_build(openapi_graph): def test_file_tool_node_build(openapi_graph): - file_tool_node = get_node_by_type(openapi_graph, FileToolNode) + file_tool_node = get_node_by_type(openapi_graph, FileToolVertex) assert file_tool_node is not None built_object = file_tool_node.build() assert built_object is not None @@ -312,7 +313,7 @@ def test_file_tool_node_build(openapi_graph): def test_wrapper_node_build(openapi_graph): - wrapper_node = get_node_by_type(openapi_graph, WrapperNode) + wrapper_node = get_node_by_type(openapi_graph, WrapperVertex) assert wrapper_node is not None built_object = wrapper_node.build() assert built_object is not None @@ -327,7 +328,7 @@ def test_get_result_and_thought(basic_graph): message = "Hello" # Find the node that is an LLMNode and change the # _built_object to a FakeListLLM - llm_node = get_node_by_type(basic_graph, LLMNode) + llm_node = get_node_by_type(basic_graph, LLMVertex) assert llm_node is not None llm_node._built_object = FakeListLLM(responses=responses) llm_node._built = True diff --git a/tests/test_llms_template.py b/tests/test_llms_template.py index ccf2f6388..db550393e 100644 --- a/tests/test_llms_template.py +++ b/tests/test_llms_template.py @@ -3,7 +3,7 @@ from langflow.settings import settings def test_llms_settings(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() llms = json_response["llms"] @@ -11,7 +11,7 @@ def test_llms_settings(client: TestClient): # def test_hugging_face_hub(client: TestClient): -# response = client.get("/all") +# response = client.get("api/v1/all") # assert response.status_code == 200 # json_response = response.json() # language_models = json_response["llms"] @@ -103,7 +103,7 @@ def test_llms_settings(client: TestClient): def test_openai(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() language_models = json_response["llms"] @@ -333,7 +333,7 @@ def test_openai(client: TestClient): def test_chat_open_ai(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() language_models = json_response["llms"] @@ -482,3 +482,77 @@ def test_chat_open_ai(client: TestClient): "ChatOpenAI", "BaseLanguageModel", } + + +def test_azure_open_ai(client: TestClient): + response = client.get("/all") + assert response.status_code == 200 + json_response = response.json() + language_models = json_response["llms"] + + model = language_models["AzureOpenAI"] + template = model["template"] + + assert template["model_name"].show is False + assert template["deployment_name"] == { + "required": False, + "placeholder": "", + "show": True, + "multiline": False, + "value": "", + "password": False, + "name": "deployment_name", + "advanced": False, + "type": "str", + "list": False, + } + + +def test_azure_chat_open_ai(client: TestClient): + response = client.get("/all") + assert response.status_code == 200 + json_response = response.json() + language_models = json_response["llms"] + + model = language_models["AzureChatOpenAI"] + template = model["template"] + + assert template["model_name"].show is False + assert template["deployment_name"] == { + "required": False, + "placeholder": "", + "show": True, + "multiline": False, + "value": "", + "password": False, + "name": "deployment_name", + "advanced": False, + "type": "str", + "list": False, + } + assert template["openai_api_type"] == { + "required": False, + "placeholder": "", + "show": False, + "multiline": False, + "value": "azure", + "password": False, + "name": "openai_api_type", + "display_name": "OpenAI API Type", + "advanced": False, + "type": "str", + "list": False, + } + assert template["openai_api_version"] == { + "required": False, + "placeholder": "", + "show": True, + "multiline": False, + "value": "2023-03-15-preview", + "password": False, + "name": "openai_api_version", + "display_name": "OpenAI API Version", + "advanced": False, + "type": "str", + "list": False, + } diff --git a/tests/test_loading.py b/tests/test_loading.py index 872314699..885eb7a82 100644 --- a/tests/test_loading.py +++ b/tests/test_loading.py @@ -2,7 +2,7 @@ import json import pytest from langchain.chains.base import Chain -from langflow import load_flow_from_json +from langflow.processing.process import load_flow_from_json from langflow.graph import Graph from langflow.utils.payload import get_root_node diff --git a/tests/test_prompts_template.py b/tests/test_prompts_template.py index 83da2f14d..a8562898c 100644 --- a/tests/test_prompts_template.py +++ b/tests/test_prompts_template.py @@ -3,7 +3,7 @@ from langflow.settings import settings def test_prompts_settings(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() prompts = json_response["prompts"] @@ -11,7 +11,7 @@ def test_prompts_settings(client: TestClient): def test_prompt_template(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() prompts = json_response["prompts"] @@ -89,7 +89,7 @@ def test_prompt_template(client: TestClient): def test_few_shot_prompt_template(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() prompts = json_response["prompts"] @@ -168,7 +168,7 @@ def test_few_shot_prompt_template(client: TestClient): def test_zero_shot_prompt(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() prompts = json_response["prompts"] diff --git a/tests/test_vectorstore_template.py b/tests/test_vectorstore_template.py index 5b1d7e5bc..0aa823786 100644 --- a/tests/test_vectorstore_template.py +++ b/tests/test_vectorstore_template.py @@ -5,7 +5,7 @@ from langflow.settings import settings # check that all agents are in settings.agents # are in json_response["agents"] def test_vectorstores_settings(client: TestClient): - response = client.get("/all") + response = client.get("api/v1/all") assert response.status_code == 200 json_response = response.json() vectorstores = json_response["vectorstores"] diff --git a/tests/test_websocket.py b/tests/test_websocket.py index 5b60d0fed..611faff79 100644 --- a/tests/test_websocket.py +++ b/tests/test_websocket.py @@ -5,17 +5,17 @@ from fastapi.testclient import TestClient def test_websocket_connection(client: TestClient): - with client.websocket_connect("/chat/test_client") as websocket: + with client.websocket_connect("api/v1/chat/test_client") as websocket: assert websocket.scope["client"] == ["testclient", 50000] - assert websocket.scope["path"] == "/chat/test_client" + assert websocket.scope["path"] == "/api/v1/chat/test_client" def test_chat_history(client: TestClient): # Mock the process_graph function to return a specific value - with patch("langflow.api.chat_manager.process_graph") as mock_process_graph: + with patch("langflow.chat.manager.process_graph") as mock_process_graph: mock_process_graph.return_value = ("Hello, I'm a mock response!", "") - with client.websocket_connect("/chat/test_client") as websocket: + with client.websocket_connect("api/v1/chat/test_client") as websocket: # First message should be the history history = websocket.receive_json() assert history == [] # Empty history