support baidu qianfan endpoint for LLM
This commit is contained in:
parent
c8d231c331
commit
851da1cb4e
6 changed files with 426 additions and 331 deletions
4
.gitignore
vendored
4
.gitignore
vendored
|
|
@ -254,4 +254,6 @@ langflow.db
|
|||
|
||||
/tmp/*
|
||||
src/backend/langflow/frontend/
|
||||
.docker
|
||||
.docker
|
||||
|
||||
.idea
|
||||
|
|
@ -217,4 +217,40 @@ Vertex AI is a cloud computing platform offered by Google Cloud Platform (GCP).
|
|||
- **top_k:** How the model selects tokens for output, the next token is selected from – defaults to `40`.
|
||||
- **top_p:** Tokens are selected from most probable to least until the sum of their – defaults to `0.95`.
|
||||
- **tuned_model_name:** The name of a tuned model. If provided, model_name is ignored.
|
||||
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output – defaults to `False`.
|
||||
- **verbose:** This parameter is used to control the level of detail in the output of the chain. When set to True, it will print out some internal states of the chain while it is being run, which can help debug and understand the chain's behavior. If set to False, it will suppress the verbose output – defaults to `False`.
|
||||
|
||||
---
|
||||
|
||||
### QianfanLLMEndpoint
|
||||
|
||||
Wrapper around [Baidu Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html) large language models.
|
||||
|
||||
:::info
|
||||
The Qianfan Big Model Platform is a one-stop platform for enterprise developers to develop and operate large models and services. It provides data management based on ERNIE Bot's underlying model (Ernie Bot), automatic model customization and fine-tuning, and one-stop large-scale model customization services for cloud deployment of prediction services, and provides ERNIE Bot's enterprise level service API that can be quickly called, helping to implement the demand for generative AI applications in various industries.
|
||||
:::
|
||||
|
||||
- **Model Name:** Model name. you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu preset models are mapping to an endpoint. `Model Name` will be ignored if `Endpoint` is set.
|
||||
- **Qianfan Ak:** which you could get from https://cloud.baidu.com/product/wenxinworkshop.
|
||||
- **Qianfan Sk:** which you could get from https://cloud.baidu.com/product/wenxinworkshop.
|
||||
- **Top p:** Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo. The diversity of the output text is affected, and the larger the value, the stronger the diversity of the generated text - defaults to `0.8`.
|
||||
- **Temperature:** Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo. Higher values make the output more random, while lower values make it more concentrated and deterministic - defaults to `0.95`.
|
||||
- **Penalty Score:** Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo. By increasing the penalty for generated tokens, the phenomenon of duplicate generation is reduced. A higher value indicates a higher penalty - defaults to `1.0`.
|
||||
- **Endpoint:** Endpoint of the Qianfan LLM, required if custom model used.
|
||||
|
||||
---
|
||||
|
||||
### QianfanChatEndpoint
|
||||
|
||||
Wrapper around [Baidu Qianfan](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html) chat large language models. This component supports some of the LLMs (Large Language Models) available by Baidu qianfan and is used for tasks such as chatbots, Generative Question-Answering (GQA), and summarization.
|
||||
|
||||
:::info
|
||||
The Qianfan Big Model Platform is a one-stop platform for enterprise developers to develop and operate large models and services. It provides data management based on ERNIE Bot's underlying model (Ernie Bot), automatic model customization and fine-tuning, and one-stop large-scale model customization services for cloud deployment of prediction services, and provides ERNIE Bot's enterprise level service API that can be quickly called, helping to implement the demand for generative AI applications in various industries.
|
||||
:::
|
||||
|
||||
- **Model Name:** Model name. you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu preset models are mapping to an endpoint. `Model Name` will be ignored if `Endpoint` is set.
|
||||
- **Qianfan Ak:** which you could get from https://cloud.baidu.com/product/wenxinworkshop.
|
||||
- **Qianfan Sk:** which you could get from https://cloud.baidu.com/product/wenxinworkshop.
|
||||
- **Top p:** Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo. The diversity of the output text is affected, and the larger the value, the stronger the diversity of the generated text - defaults to `0.8`.
|
||||
- **Temperature:** Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo. Higher values make the output more random, while lower values make it more concentrated and deterministic - defaults to `0.95`.
|
||||
- **Penalty Score:** Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo. By increasing the penalty for generated tokens, the phenomenon of duplicate generation is reduced. A higher value indicates a higher penalty - defaults to `1.0`.
|
||||
- **Endpoint:** Endpoint of the Qianfan LLM, required if custom model used.
|
||||
530
poetry.lock
generated
530
poetry.lock
generated
File diff suppressed because it is too large
Load diff
|
|
@ -91,6 +91,7 @@ pillow = "^10.0.0"
|
|||
metal-sdk = "^2.2.0"
|
||||
markupsafe = "^2.1.3"
|
||||
numexpr = "^2.8.6"
|
||||
qianfan = "0.0.5"
|
||||
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,92 @@
|
|||
from typing import Optional
|
||||
from langflow import CustomComponent
|
||||
from langchain.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
|
||||
from langchain.llms.base import BaseLLM
|
||||
|
||||
|
||||
class QianfanChatEndpointComponent(CustomComponent):
|
||||
display_name: str = "QianfanChatEndpoint"
|
||||
description: str = (
|
||||
"Baidu Qianfan chat models. Get more detail from "
|
||||
"https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint."
|
||||
)
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"model": {
|
||||
"display_name": "Model Name",
|
||||
"options": [
|
||||
"ERNIE-Bot",
|
||||
"ERNIE-Bot-turbo",
|
||||
"BLOOMZ-7B",
|
||||
"Llama-2-7b-chat",
|
||||
"Llama-2-13b-chat",
|
||||
"Llama-2-70b-chat",
|
||||
"Qianfan-BLOOMZ-7B-compressed",
|
||||
"Qianfan-Chinese-Llama-2-7B",
|
||||
"ChatGLM2-6B-32K",
|
||||
"AquilaChat-7B",
|
||||
],
|
||||
"info": "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint",
|
||||
"required": True,
|
||||
},
|
||||
"qianfan_ak": {
|
||||
"display_name": "Qianfan Ak",
|
||||
"required": True,
|
||||
"password": True,
|
||||
"info": "which you could get from https://cloud.baidu.com/product/wenxinworkshop",
|
||||
},
|
||||
"qianfan_sk": {
|
||||
"display_name": "Qianfan Sk",
|
||||
"required": True,
|
||||
"password": True,
|
||||
"info": "which you could get from https://cloud.baidu.com/product/wenxinworkshop",
|
||||
},
|
||||
"top_p": {
|
||||
"display_name": "Top p",
|
||||
"field_type": "float",
|
||||
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
|
||||
"value": 0.8,
|
||||
},
|
||||
"temperature": {
|
||||
"display_name": "Temperature",
|
||||
"field_type": "float",
|
||||
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
|
||||
"value": 0.95,
|
||||
},
|
||||
"penalty_score": {
|
||||
"display_name": "Penalty Score",
|
||||
"field_type": "float",
|
||||
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
|
||||
"value": 1.0,
|
||||
},
|
||||
"endpoint": {
|
||||
"display_name": "Endpoint",
|
||||
"info": "Endpoint of the Qianfan LLM, required if custom model used.",
|
||||
},
|
||||
"code": {"show": False},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
model: str = "ERNIE-Bot-turbo",
|
||||
qianfan_ak: Optional[str] = None,
|
||||
qianfan_sk: Optional[str] = None,
|
||||
top_p: Optional[float] = None,
|
||||
temperature: Optional[float] = None,
|
||||
penalty_score: Optional[float] = None,
|
||||
endpoint: Optional[str] = None,
|
||||
) -> BaseLLM:
|
||||
try:
|
||||
output = QianfanChatEndpoint(
|
||||
model=model,
|
||||
qianfan_ak=qianfan_ak,
|
||||
qianfan_sk=qianfan_sk,
|
||||
top_p=top_p,
|
||||
temperature=temperature,
|
||||
penalty_score=penalty_score,
|
||||
endpoint=endpoint,
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValueError("Could not connect to Baidu Qianfan API.") from e
|
||||
return output
|
||||
|
|
@ -0,0 +1,92 @@
|
|||
from typing import Optional
|
||||
from langflow import CustomComponent
|
||||
from langchain.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
|
||||
from langchain.llms.base import BaseLLM
|
||||
|
||||
|
||||
class QianfanLLMEndpointComponent(CustomComponent):
|
||||
display_name: str = "QianfanLLMEndpoint"
|
||||
description: str = (
|
||||
"Baidu Qianfan hosted open source or customized models. "
|
||||
"Get more detail from https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint"
|
||||
)
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"model": {
|
||||
"display_name": "Model Name",
|
||||
"options": [
|
||||
"ERNIE-Bot",
|
||||
"ERNIE-Bot-turbo",
|
||||
"BLOOMZ-7B",
|
||||
"Llama-2-7b-chat",
|
||||
"Llama-2-13b-chat",
|
||||
"Llama-2-70b-chat",
|
||||
"Qianfan-BLOOMZ-7B-compressed",
|
||||
"Qianfan-Chinese-Llama-2-7B",
|
||||
"ChatGLM2-6B-32K",
|
||||
"AquilaChat-7B",
|
||||
],
|
||||
"info": "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint",
|
||||
"required": True,
|
||||
},
|
||||
"qianfan_ak": {
|
||||
"display_name": "Qianfan Ak",
|
||||
"required": True,
|
||||
"password": True,
|
||||
"info": "which you could get from https://cloud.baidu.com/product/wenxinworkshop",
|
||||
},
|
||||
"qianfan_sk": {
|
||||
"display_name": "Qianfan Sk",
|
||||
"required": True,
|
||||
"password": True,
|
||||
"info": "which you could get from https://cloud.baidu.com/product/wenxinworkshop",
|
||||
},
|
||||
"top_p": {
|
||||
"display_name": "Top p",
|
||||
"field_type": "float",
|
||||
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
|
||||
"value": 0.8,
|
||||
},
|
||||
"temperature": {
|
||||
"display_name": "Temperature",
|
||||
"field_type": "float",
|
||||
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
|
||||
"value": 0.95,
|
||||
},
|
||||
"penalty_score": {
|
||||
"display_name": "Penalty Score",
|
||||
"field_type": "float",
|
||||
"info": "Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
|
||||
"value": 1.0,
|
||||
},
|
||||
"endpoint": {
|
||||
"display_name": "Endpoint",
|
||||
"info": "Endpoint of the Qianfan LLM, required if custom model used.",
|
||||
},
|
||||
"code": {"show": False},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
model: str = "ERNIE-Bot-turbo",
|
||||
qianfan_ak: Optional[str] = None,
|
||||
qianfan_sk: Optional[str] = None,
|
||||
top_p: Optional[float] = None,
|
||||
temperature: Optional[float] = None,
|
||||
penalty_score: Optional[float] = None,
|
||||
endpoint: Optional[str] = None,
|
||||
) -> BaseLLM:
|
||||
try:
|
||||
output = QianfanLLMEndpoint(
|
||||
model=model,
|
||||
qianfan_ak=qianfan_ak,
|
||||
qianfan_sk=qianfan_sk,
|
||||
top_p=top_p,
|
||||
temperature=temperature,
|
||||
penalty_score=penalty_score,
|
||||
endpoint=endpoint,
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValueError("Could not connect to Baidu Qianfan API.") from e
|
||||
return output
|
||||
Loading…
Add table
Add a link
Reference in a new issue