Add support to llama as llm
This commit is contained in:
parent
945f160db7
commit
f02f0f9c98
7 changed files with 31 additions and 4 deletions
|
|
@ -3,7 +3,7 @@ FROM python:3.10-slim
|
|||
WORKDIR /app
|
||||
|
||||
# Install Poetry
|
||||
RUN apt-get update && apt-get install gcc curl -y
|
||||
RUN apt-get update && apt-get install gcc g++ curl -y
|
||||
RUN curl -sSL https://install.python-poetry.org | python3 -
|
||||
# # Add Poetry to PATH
|
||||
ENV PATH="${PATH}:/root/.local/bin"
|
||||
|
|
@ -15,4 +15,7 @@ COPY ./ ./
|
|||
# Install dependencies
|
||||
RUN poetry config virtualenvs.create false && poetry install --no-interaction --no-ansi
|
||||
|
||||
# Set the logging level to DEBUG
|
||||
ENV LOG_LEVEL=debug
|
||||
|
||||
CMD ["uvicorn", "langflow.main:app", "--host", "0.0.0.0", "--port", "5003", "--reload", "log-level", "debug"]
|
||||
|
|
@ -10,6 +10,12 @@ services:
|
|||
volumes:
|
||||
- ./:/app
|
||||
command: bash -c "uvicorn langflow.main:app --host 0.0.0.0 --port 7860 --reload"
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: '4'
|
||||
memory: 16G
|
||||
|
||||
|
||||
frontend:
|
||||
build:
|
||||
|
|
|
|||
18
poetry.lock
generated
18
poetry.lock
generated
|
|
@ -1,4 +1,4 @@
|
|||
# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "aiohttp"
|
||||
|
|
@ -1274,6 +1274,20 @@ llms = ["anthropic (>=0.2.4,<0.3.0)", "cohere (>=3,<4)", "huggingface_hub (>=0,<
|
|||
openai = ["openai (>=0,<1)"]
|
||||
qdrant = ["qdrant-client (>=1.1.1,<2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "llama-cpp-python"
|
||||
version = "0.1.23"
|
||||
description = "A Python wrapper for llama.cpp"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "llama_cpp_python-0.1.23.tar.gz", hash = "sha256:323a937e68e04251b5ad1804922e05d15c8b6bfbcf7c3e683a7b39a20e165ebf"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=4.5.0,<5.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "markdown-it-py"
|
||||
version = "2.2.0"
|
||||
|
|
@ -2763,4 +2777,4 @@ testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more
|
|||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.9"
|
||||
content-hash = "70e86f7d3b5caed792e37ccf9e11ed95008e5078dd8830e4f8b96cc1d35c7b60"
|
||||
content-hash = "a5f1a33bedd704cea56a6c8d3d97c8d8daad4b78f47765cca068f88face28647"
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ dill = "^0.3.6"
|
|||
pandas = "^1.5.3"
|
||||
huggingface-hub = "^0.13.3"
|
||||
rich = "^13.3.3"
|
||||
llama-cpp-python = "0.1.23"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
black = "^23.1.0"
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ llms:
|
|||
# - AzureOpenAI
|
||||
- ChatOpenAI
|
||||
- HuggingFaceHub
|
||||
- LlamaCpp
|
||||
|
||||
tools:
|
||||
- Search
|
||||
|
|
|
|||
|
|
@ -12,12 +12,14 @@ from langchain import (
|
|||
)
|
||||
from langchain.agents import agent_toolkits
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.llms import LlamaCpp
|
||||
|
||||
from langflow.interface.importing.utils import import_class
|
||||
|
||||
## LLM
|
||||
llm_type_to_cls_dict = llms.type_to_cls_dict
|
||||
llm_type_to_cls_dict["openai-chat"] = ChatOpenAI # type: ignore
|
||||
llm_type_to_cls_dict["llamacpp"] = LlamaCpp # type: ignore
|
||||
|
||||
## Chain
|
||||
chain_type_to_cls_dict: dict[str, Any] = {
|
||||
|
|
|
|||
|
|
@ -59,5 +59,5 @@
|
|||
"last 1 safari version"
|
||||
]
|
||||
},
|
||||
"proxy": "http://backend:7860"
|
||||
"proxy": "http://127.0.0.1:5003"
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue