From c979823d58e5b021679239cbe1e2eb88042566a2 Mon Sep 17 00:00:00 2001 From: yamonkjd Date: Thu, 21 Dec 2023 17:59:15 +0900 Subject: [PATCH 1/5] Create ChatOllamaEndpoint.py This is a draft of a custom component to access the Ollama API endpoint. --- .../components/llms/ChatOllamaEndpoint.py | 212 ++++++++++++++++++ 1 file changed, 212 insertions(+) create mode 100644 src/backend/langflow/components/llms/ChatOllamaEndpoint.py diff --git a/src/backend/langflow/components/llms/ChatOllamaEndpoint.py b/src/backend/langflow/components/llms/ChatOllamaEndpoint.py new file mode 100644 index 000000000..034c8c9d1 --- /dev/null +++ b/src/backend/langflow/components/llms/ChatOllamaEndpoint.py @@ -0,0 +1,212 @@ +from typing import Optional, List +from langchain.chat_models.base import BaseChatModel +from langchain_community.chat_models import ChatOllama +from langflow import CustomComponent +from langchain.callbacks.manager import CallbackManager +from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler + + +class ChatOllamaComponent(CustomComponent): + display_name = "ChatOllama" + description = "Local LLM for chat with Ollama." + + def build_config(self) -> dict: + return { + "base_url": { + "display_name": "Base URL", + "value": "http://localhost:11434", + "info": "Endpoint of the Ollama API." + }, + "model": { + "display_name": "Model Name", + "value": "llama2", + "info": "Refer to https://ollama.ai/library for more models." + }, + "temperature": { + "display_name": "Temperature", + "field_type": "float", + "value": 0.8, + "info": "Controls the creativity of model responses." + }, + "cache": { + "display_name": "Cache", + "field_type": "bool", + "info": "Enable or disable caching.", + "advanced": True, + "value": False + }, + "callback_manager": { + "display_name": "Callback Manager", + "info": "Optional callback manager for additional functionality.", + "advanced": True, + "value": None + }, + "callbacks": { + "display_name": "Callbacks", + "info": "Callbacks to execute during model runtime.", + "advanced": True, + "value": None + }, + "format": { + "display_name": "Format", + "field_type": "str", + "info": "Specify the format of the output (e.g., json).", + "advanced": True, + "value": None + }, + "metadata": { + "display_name": "Metadata", + "info": "Metadata to add to the run trace.", + "advanced": True, + "value": None + }, + "mirostat": { + "display_name": "Mirostat", + "field_type": "int", + "info": "Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)", + "advanced": True, + "value": 0 + }, + "mirostat_eta": { + "display_name": "Mirostat Eta", + "field_type": "float", + "info": "Learning rate for Mirostat algorithm. (Default: 0.1)", + "advanced": True, + "value": 0.1 + }, + "mirostat_tau": { + "display_name": "Mirostat Tau", + "field_type": "float", + "info": "Controls the balance between coherence and diversity of the output. (Default: 5.0)", + "advanced": True, + "value": 5.0 + }, + "num_ctx": { + "display_name": "Context Window Size", + "field_type": "int", + "info": "Size of the context window for generating tokens. (Default: 2048)", + "advanced": True, + "value": 2048 + }, + "num_gpu": { + "display_name": "Number of GPUs", + "field_type": "int", + "info": "Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)", + "advanced": True, + "value": 0 + }, + "num_thread": { + "display_name": "Number of Threads", + "field_type": "int", + "info": "Number of threads to use during computation. (Default: detected for optimal performance)", + "advanced": True, + "value": None + }, + "repeat_last_n": { + "display_name": "Repeat Last N", + "field_type": "int", + "info": "How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)", + "advanced": True, + "value": 64 + }, + "repeat_penalty": { + "display_name": "Repeat Penalty", + "field_type": "float", + "info": "Penalty for repetitions in generated text. (Default: 1.1)", + "advanced": True, + "value": 1.1 + }, + "tfs_z": { + "display_name": "TFS Z", + "field_type": "float", + "info": "Tail free sampling value. (Default: 1)", + "advanced": True, + "value": 1.0 + }, + "timeout": { + "display_name": "Timeout", + "field_type": "int", + "info": "Timeout for the request stream.", + "advanced": True, + "value": None + }, + "top_k": { + "display_name": "Top K", + "field_type": "int", + "info": "Limits token selection to top K. (Default: 40)", + "advanced": True, + "value": 40 + }, + "top_p": { + "display_name": "Top P", + "field_type": "float", + "info": "Works together with top-k. (Default: 0.9)", + "advanced": True, + "value": 0.9 + }, + "verbose": { + "display_name": "Verbose", + "field_type": "bool", + "info": "Whether to print out response text.", + "value": None + }, + "tags": { + "display_name": "Tags", + "field_type": "list", + "info": "Tags to add to the run trace.", + "advanced": True, + "value": None + }, + } + + def build(self, base_url: str, model: str, mirostat: Optional[int], + mirostat_eta: Optional[float], mirostat_tau: Optional[float], + num_ctx: Optional[int], num_gpu: Optional[int], + repeat_last_n: Optional[int], + repeat_penalty: Optional[float], temperature: Optional[float], + tfs_z: Optional[float], + num_thread: Optional[int] = None, + stop: Optional[List[str]] = None, + tags: Optional[List[str]] = None, + system: Optional[str] = None, + template: Optional[str] = None, + timeout: Optional[int] = None, + top_k: Optional[int] = None, + top_p: Optional[int] = None, verbose: Optional[bool] = None + ) -> BaseChatModel: + + callback_manager = CallbackManager( + [StreamingStdOutCallbackHandler()]) + + llm_params = { + "base_url": base_url, + "model": model, + "mirostat": mirostat, + "mirostat_eta": mirostat_eta, + "mirostat_tau": mirostat_tau, + "num_ctx": num_ctx, + "num_gpu": num_gpu, + "num_thread": num_thread, + "repeat_last_n": repeat_last_n, + "repeat_penalty": repeat_penalty, + "temperature": temperature, + "stop": stop, + "system": system, + "template": template, + "tfs_z": tfs_z, + "timeout": timeout, + "top_k": top_k, + "top_p": top_p, + "verbose": verbose, + "callback_manager": callback_manager + } + + # None Value Remove + llm_params = {k: v for k, v in llm_params.items() if v is not None} + + try: + output = ChatOllama(**llm_params) + except Exception as e: + raise ValueError("Could not initialize Ollama LLM.") from e + + return output From bea506523743b504de30b57d9a8523011660d80d Mon Sep 17 00:00:00 2001 From: yamonkjd Date: Sat, 23 Dec 2023 03:57:38 +0900 Subject: [PATCH 2/5] Update ChatOllamaEndpoint.py --- .../components/llms/ChatOllamaEndpoint.py | 159 +++++++++++------- 1 file changed, 99 insertions(+), 60 deletions(-) diff --git a/src/backend/langflow/components/llms/ChatOllamaEndpoint.py b/src/backend/langflow/components/llms/ChatOllamaEndpoint.py index 034c8c9d1..a6f79c332 100644 --- a/src/backend/langflow/components/llms/ChatOllamaEndpoint.py +++ b/src/backend/langflow/components/llms/ChatOllamaEndpoint.py @@ -1,9 +1,15 @@ -from typing import Optional, List +from typing import Optional, List, Dict, Any from langchain.chat_models.base import BaseChatModel -from langchain_community.chat_models import ChatOllama + +# from langchain_community.chat_models import ChatOllama +from langchain.chat_models import ChatOllama + +# from langchain.chat_models import ChatOllama from langflow import CustomComponent -from langchain.callbacks.manager import CallbackManager -from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler + + +# whe When a callback component is added to Langflow, the comment must be uncommented. +# from langchain.callbacks.manager import CallbackManager class ChatOllamaComponent(CustomComponent): @@ -14,174 +20,208 @@ class ChatOllamaComponent(CustomComponent): return { "base_url": { "display_name": "Base URL", - "value": "http://localhost:11434", - "info": "Endpoint of the Ollama API." + "info": "Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.", }, "model": { "display_name": "Model Name", "value": "llama2", - "info": "Refer to https://ollama.ai/library for more models." + "info": "Refer to https://ollama.ai/library for more models.", }, "temperature": { "display_name": "Temperature", "field_type": "float", "value": 0.8, - "info": "Controls the creativity of model responses." + "info": "Controls the creativity of model responses.", }, "cache": { "display_name": "Cache", "field_type": "bool", "info": "Enable or disable caching.", "advanced": True, - "value": False - }, - "callback_manager": { - "display_name": "Callback Manager", - "info": "Optional callback manager for additional functionality.", - "advanced": True, - "value": None - }, - "callbacks": { - "display_name": "Callbacks", - "info": "Callbacks to execute during model runtime.", - "advanced": True, - "value": None + "value": False, }, + ### When a callback component is added to Langflow, the comment must be uncommented. ### + # "callback_manager": { + # "display_name": "Callback Manager", + # "info": "Optional callback manager for additional functionality.", + # "advanced": True, + # }, + # "callbacks": { + # "display_name": "Callbacks", + # "info": "Callbacks to execute during model runtime.", + # "advanced": True, + # }, + ######################################################################################## "format": { "display_name": "Format", "field_type": "str", "info": "Specify the format of the output (e.g., json).", "advanced": True, - "value": None }, "metadata": { "display_name": "Metadata", "info": "Metadata to add to the run trace.", "advanced": True, - "value": None }, "mirostat": { "display_name": "Mirostat", - "field_type": "int", - "info": "Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)", + "options": ["Disabled", "Mirostat", "Mirostat 2.0"], + "info": "Enable/disable Mirostat sampling for controlling perplexity.", + "value": "Disabled", "advanced": True, - "value": 0 }, "mirostat_eta": { "display_name": "Mirostat Eta", "field_type": "float", "info": "Learning rate for Mirostat algorithm. (Default: 0.1)", "advanced": True, - "value": 0.1 }, "mirostat_tau": { "display_name": "Mirostat Tau", "field_type": "float", "info": "Controls the balance between coherence and diversity of the output. (Default: 5.0)", "advanced": True, - "value": 5.0 }, "num_ctx": { "display_name": "Context Window Size", "field_type": "int", "info": "Size of the context window for generating tokens. (Default: 2048)", "advanced": True, - "value": 2048 }, "num_gpu": { "display_name": "Number of GPUs", "field_type": "int", "info": "Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)", "advanced": True, - "value": 0 }, "num_thread": { "display_name": "Number of Threads", "field_type": "int", "info": "Number of threads to use during computation. (Default: detected for optimal performance)", "advanced": True, - "value": None }, "repeat_last_n": { "display_name": "Repeat Last N", "field_type": "int", "info": "How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)", "advanced": True, - "value": 64 }, "repeat_penalty": { "display_name": "Repeat Penalty", "field_type": "float", "info": "Penalty for repetitions in generated text. (Default: 1.1)", "advanced": True, - "value": 1.1 }, "tfs_z": { "display_name": "TFS Z", "field_type": "float", "info": "Tail free sampling value. (Default: 1)", "advanced": True, - "value": 1.0 }, "timeout": { "display_name": "Timeout", "field_type": "int", "info": "Timeout for the request stream.", "advanced": True, - "value": None }, "top_k": { "display_name": "Top K", "field_type": "int", "info": "Limits token selection to top K. (Default: 40)", "advanced": True, - "value": 40 }, "top_p": { "display_name": "Top P", "field_type": "float", "info": "Works together with top-k. (Default: 0.9)", "advanced": True, - "value": 0.9 }, "verbose": { "display_name": "Verbose", "field_type": "bool", "info": "Whether to print out response text.", - "value": None }, "tags": { "display_name": "Tags", "field_type": "list", "info": "Tags to add to the run trace.", "advanced": True, - "value": None + }, + "stop": { + "display_name": "Stop Tokens", + "field_type": "list", + "info": "List of tokens to signal the model to stop generating text.", + "advanced": True, + }, + "system": { + "display_name": "System", + "field_type": "str", + "info": "System to use for generating text.", + "advanced": True, + }, + "template": { + "display_name": "Template", + "field_type": "str", + "info": "Template to use for generating text.", }, } - def build(self, base_url: str, model: str, mirostat: Optional[int], - mirostat_eta: Optional[float], mirostat_tau: Optional[float], - num_ctx: Optional[int], num_gpu: Optional[int], - repeat_last_n: Optional[int], - repeat_penalty: Optional[float], temperature: Optional[float], - tfs_z: Optional[float], - num_thread: Optional[int] = None, - stop: Optional[List[str]] = None, - tags: Optional[List[str]] = None, - system: Optional[str] = None, - template: Optional[str] = None, - timeout: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[int] = None, verbose: Optional[bool] = None - ) -> BaseChatModel: + def build( + self, + base_url: Optional[str], + model: str, + mirostat: Optional[str], + mirostat_eta: Optional[float] = None, + mirostat_tau: Optional[float] = None, + ### When a callback component is added to Langflow, the comment must be uncommented.### + # callback_manager: Optional[CallbackManager] = None, + # callbacks: Optional[List[Callbacks]] = None, + ####################################################################################### + repeat_last_n: Optional[int] = None, + verbose: Optional[bool] = None, + cache: Optional[bool] = None, + num_ctx: Optional[int] = None, + num_gpu: Optional[int] = None, + format: Optional[str] = None, + metadata: Optional[Dict[str, Any]] = None, + num_thread: Optional[int] = None, + repeat_penalty: Optional[float] = None, + stop: Optional[List[str]] = None, + system: Optional[str] = None, + tags: Optional[List[str]] = None, + temperature: Optional[float] = None, + template: Optional[str] = None, + tfs_z: Optional[float] = None, + timeout: Optional[int] = None, + top_k: Optional[int] = None, + top_p: Optional[int] = None, + ) -> BaseChatModel: + if not base_url: + base_url = "http://localhost:11434" - callback_manager = CallbackManager( - [StreamingStdOutCallbackHandler()]) + # Mapping mirostat settings to their corresponding values + mirostat_options = {"Mirostat": 1, "Mirostat 2.0": 2} + + # Default to 0 for 'Disabled' + mirostat_value = mirostat_options.get(mirostat, 0) # type: ignore + + # Set mirostat_eta and mirostat_tau to None if mirostat is disabled + if mirostat_value == 0: + mirostat_eta = None + mirostat_tau = None llm_params = { "base_url": base_url, + "cache": cache, "model": model, - "mirostat": mirostat, + "mirostat": mirostat_value, + "format": format, + "metadata": metadata, + "tags": tags, + ## When a callback component is added to Langflow, the comment must be uncommented.## + # "callback_manager": callback_manager, + # "callbacks": callbacks, + ##################################################################################### "mirostat_eta": mirostat_eta, "mirostat_tau": mirostat_tau, "num_ctx": num_ctx, @@ -198,10 +238,9 @@ class ChatOllamaComponent(CustomComponent): "top_k": top_k, "top_p": top_p, "verbose": verbose, - "callback_manager": callback_manager } - # None Value Remove + # None Value remove llm_params = {k: v for k, v in llm_params.items() if v is not None} try: @@ -209,4 +248,4 @@ class ChatOllamaComponent(CustomComponent): except Exception as e: raise ValueError("Could not initialize Ollama LLM.") from e - return output + return output # type: ignore From c303bf3d498c40352e546ee404bf3ee48b0e7205 Mon Sep 17 00:00:00 2001 From: yamonkjd Date: Sat, 23 Dec 2023 03:58:36 +0900 Subject: [PATCH 3/5] Create OllamaLLM.py --- .../langflow/components/llms/OllamaLLM.py | 157 ++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 src/backend/langflow/components/llms/OllamaLLM.py diff --git a/src/backend/langflow/components/llms/OllamaLLM.py b/src/backend/langflow/components/llms/OllamaLLM.py new file mode 100644 index 000000000..6e67970d5 --- /dev/null +++ b/src/backend/langflow/components/llms/OllamaLLM.py @@ -0,0 +1,157 @@ +from typing import Optional, List + +from langchain.llms import Ollama +from langchain.llms.base import BaseLLM + +from langflow import CustomComponent + + +class OllamaLLM(CustomComponent): + display_name = "Ollama" + description = "Local LLM with Ollama." + + def build_config(self) -> dict: + return { + "base_url": { + "display_name": "Base URL", + "info": "Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified." + }, + "model": { + "display_name": "Model Name", + "value": "llama2", + "info": "Refer to https://ollama.ai/library for more models." + }, + "temperature": { + "display_name": "Temperature", + "field_type": "float", + "value": 0.8, + "info": "Controls the creativity of model responses." + }, + + "mirostat": { + "display_name": "Mirostat", + "options": ["Disabled", "Mirostat", "Mirostat 2.0"], + "info": "Enable/disable Mirostat sampling for controlling perplexity.", + "value": "Disabled", + "advanced": True + }, + "mirostat_eta": { + "display_name": "Mirostat Eta", + "field_type": "float", + "info": "Learning rate influencing the algorithm's response to feedback.", + "advanced": True + }, + + + "mirostat_tau": { + "display_name": "Mirostat Tau", + "field_type": "float", + "value": 5.0, + "info": "Controls balance between coherence and diversity.", + "advanced": True + }, + "num_ctx": { + "display_name": "Context Window Size", + "field_type": "int", + "value": 2048, + "info": "Size of the context window for generating the next token.", + "advanced": True + }, + "num_gpu": { + "display_name": "Number of GPUs", + "field_type": "int", + "info": "Number of GPUs to use for computation.", + "advanced": True + }, + "num_thread": { + "display_name": "Number of Threads", + "field_type": "int", + "info": "Number of threads to use during computation.", + "advanced": True + }, + "repeat_last_n": { + "display_name": "Repeat Last N", + "field_type": "int", + "value": 64, + "info": "Sets how far back the model looks to prevent repetition.", + "advanced": True + }, + "repeat_penalty": { + "display_name": "Repeat Penalty", + "field_type": "float", + "value": 1.1, + "info": "Penalty for repetitions in generated text.", + "advanced": True + }, + + "stop": { + "display_name": "Stop Tokens", + + "info": "List of tokens to signal the model to stop generating text.", + "advanced": True + }, + "tfs_z": { + "display_name": "TFS Z", + "field_type": "float", + "value": 1, + "info": "Tail free sampling to reduce impact of less probable tokens.", + "advanced": True + }, + "top_k": { + "display_name": "Top K", + "field_type": "int", + "value": 40, + "info": "Limits token selection to top K for reducing nonsense generation.", + "advanced": True + }, + "top_p": { + "display_name": "Top P", + "field_type": "int", + "value": 0.9, + "info": "Works with top-k to control diversity of generated text.", + "advanced": True + }, + } + + def build(self, base_url: Optional[str], model: str, mirostat: str, mirostat_eta: Optional[float], + mirostat_tau: Optional[float], num_ctx: Optional[int], num_gpu: Optional[int], + num_thread: Optional[int], repeat_last_n: Optional[int], repeat_penalty: Optional[float], + temperature: Optional[float], stop: Optional[List[str]], tfs_z: Optional[float], + top_k: Optional[int], top_p: Optional[int]) -> BaseLLM: + + if not base_url: + base_url = "http://localhost:11434" + + mirostat_value = 0 # Default value for 'Disabled' + + # Map the textual option to the corresponding integer + if mirostat == "Mirostat": + mirostat_value = 1 + elif mirostat == "Mirostat 2.0": + mirostat_value = 2 + + params = {k: v for k, v in { + 'base_url': base_url, + 'model': model, + 'mirostat': mirostat_value, + 'mirostat_eta': mirostat_eta, + 'mirostat_tau': mirostat_tau, + 'num_ctx': num_ctx, + 'num_gpu': num_gpu, + 'num_thread': num_thread, + 'repeat_last_n': repeat_last_n, + 'repeat_penalty': repeat_penalty, + 'temperature': temperature, + 'stop': stop, + 'tfs_z': tfs_z, + 'top_k': top_k, + 'top_p': top_p, + 'streaming' :"True" + }.items() if v is not None} + + try: + llm = Ollama(**params) + except Exception as e: + raise ValueError("Could not connect to Ollama.") from e + + return llm From 3b6672e06d1fe897793f35591feee9d2e22cb8b2 Mon Sep 17 00:00:00 2001 From: yamonkjd Date: Tue, 26 Dec 2023 18:57:27 +0900 Subject: [PATCH 4/5] Update OllamaLLM.py --- .../langflow/components/llms/OllamaLLM.py | 122 +++++++++--------- 1 file changed, 64 insertions(+), 58 deletions(-) diff --git a/src/backend/langflow/components/llms/OllamaLLM.py b/src/backend/langflow/components/llms/OllamaLLM.py index 6e67970d5..9bf00ad5d 100644 --- a/src/backend/langflow/components/llms/OllamaLLM.py +++ b/src/backend/langflow/components/llms/OllamaLLM.py @@ -14,143 +14,149 @@ class OllamaLLM(CustomComponent): return { "base_url": { "display_name": "Base URL", - "info": "Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified." + "info": "Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.", }, "model": { "display_name": "Model Name", "value": "llama2", - "info": "Refer to https://ollama.ai/library for more models." + "info": "Refer to https://ollama.ai/library for more models.", }, "temperature": { "display_name": "Temperature", "field_type": "float", "value": 0.8, - "info": "Controls the creativity of model responses." + "info": "Controls the creativity of model responses.", }, - "mirostat": { "display_name": "Mirostat", "options": ["Disabled", "Mirostat", "Mirostat 2.0"], "info": "Enable/disable Mirostat sampling for controlling perplexity.", "value": "Disabled", - "advanced": True + "advanced": True, }, "mirostat_eta": { "display_name": "Mirostat Eta", "field_type": "float", "info": "Learning rate influencing the algorithm's response to feedback.", - "advanced": True + "advanced": True, }, - - "mirostat_tau": { "display_name": "Mirostat Tau", "field_type": "float", - "value": 5.0, "info": "Controls balance between coherence and diversity.", - "advanced": True + "advanced": True, }, "num_ctx": { "display_name": "Context Window Size", "field_type": "int", - "value": 2048, "info": "Size of the context window for generating the next token.", - "advanced": True + "advanced": True, }, "num_gpu": { "display_name": "Number of GPUs", "field_type": "int", "info": "Number of GPUs to use for computation.", - "advanced": True + "advanced": True, }, "num_thread": { "display_name": "Number of Threads", "field_type": "int", "info": "Number of threads to use during computation.", - "advanced": True + "advanced": True, }, "repeat_last_n": { "display_name": "Repeat Last N", "field_type": "int", - "value": 64, "info": "Sets how far back the model looks to prevent repetition.", - "advanced": True + "advanced": True, }, "repeat_penalty": { "display_name": "Repeat Penalty", "field_type": "float", - "value": 1.1, "info": "Penalty for repetitions in generated text.", - "advanced": True + "advanced": True, }, - "stop": { "display_name": "Stop Tokens", - "info": "List of tokens to signal the model to stop generating text.", - "advanced": True + "advanced": True, }, "tfs_z": { "display_name": "TFS Z", "field_type": "float", - "value": 1, "info": "Tail free sampling to reduce impact of less probable tokens.", - "advanced": True + "advanced": True, }, "top_k": { "display_name": "Top K", "field_type": "int", - "value": 40, "info": "Limits token selection to top K for reducing nonsense generation.", - "advanced": True + "advanced": True, }, "top_p": { "display_name": "Top P", "field_type": "int", - "value": 0.9, "info": "Works with top-k to control diversity of generated text.", - "advanced": True + "advanced": True, }, } - def build(self, base_url: Optional[str], model: str, mirostat: str, mirostat_eta: Optional[float], - mirostat_tau: Optional[float], num_ctx: Optional[int], num_gpu: Optional[int], - num_thread: Optional[int], repeat_last_n: Optional[int], repeat_penalty: Optional[float], - temperature: Optional[float], stop: Optional[List[str]], tfs_z: Optional[float], - top_k: Optional[int], top_p: Optional[int]) -> BaseLLM: - + def build( + self, + base_url: Optional[str], + model: str, + temperature: Optional[float], + mirostat: Optional[str], + mirostat_eta: Optional[float] = None, + mirostat_tau: Optional[float] = None, + num_ctx: Optional[int] = None, + num_gpu: Optional[int] = None, + num_thread: Optional[int] = None, + repeat_last_n: Optional[int] = None, + repeat_penalty: Optional[float] = None, + stop: Optional[List[str]] = None, + tfs_z: Optional[float] = None, + top_k: Optional[int] = None, + top_p: Optional[int] = None, + ) -> BaseLLM: if not base_url: base_url = "http://localhost:11434" - mirostat_value = 0 # Default value for 'Disabled' + # Mapping mirostat settings to their corresponding values + mirostat_options = {"Mirostat": 1, "Mirostat 2.0": 2} - # Map the textual option to the corresponding integer - if mirostat == "Mirostat": - mirostat_value = 1 - elif mirostat == "Mirostat 2.0": - mirostat_value = 2 + # Default to 0 for 'Disabled' + mirostat_value = mirostat_options.get(mirostat, 0) # type: ignore + + # Set mirostat_eta and mirostat_tau to None if mirostat is disabled + if mirostat_value == 0: + mirostat_eta = None + mirostat_tau = None + + llm_params = { + "base_url": base_url, + "model": model, + "mirostat": mirostat_value, + "mirostat_eta": mirostat_eta, + "mirostat_tau": mirostat_tau, + "num_ctx": num_ctx, + "num_gpu": num_gpu, + "num_thread": num_thread, + "repeat_last_n": repeat_last_n, + "repeat_penalty": repeat_penalty, + "temperature": temperature, + "stop": stop, + "tfs_z": tfs_z, + "top_k": top_k, + "top_p": top_p, + } + + # None Value remove + llm_params = {k: v for k, v in llm_params.items() if v is not None} - params = {k: v for k, v in { - 'base_url': base_url, - 'model': model, - 'mirostat': mirostat_value, - 'mirostat_eta': mirostat_eta, - 'mirostat_tau': mirostat_tau, - 'num_ctx': num_ctx, - 'num_gpu': num_gpu, - 'num_thread': num_thread, - 'repeat_last_n': repeat_last_n, - 'repeat_penalty': repeat_penalty, - 'temperature': temperature, - 'stop': stop, - 'tfs_z': tfs_z, - 'top_k': top_k, - 'top_p': top_p, - 'streaming' :"True" - }.items() if v is not None} try: - llm = Ollama(**params) + llm = Ollama(**llm_params) except Exception as e: raise ValueError("Could not connect to Ollama.") from e From e9cd3392784ffa8b7d7674265097d0398cc4906f Mon Sep 17 00:00:00 2001 From: yamonkjd Date: Tue, 26 Dec 2023 18:57:40 +0900 Subject: [PATCH 5/5] Update ChatOllamaEndpoint.py --- src/backend/langflow/components/llms/ChatOllamaEndpoint.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/backend/langflow/components/llms/ChatOllamaEndpoint.py b/src/backend/langflow/components/llms/ChatOllamaEndpoint.py index a6f79c332..94e9df695 100644 --- a/src/backend/langflow/components/llms/ChatOllamaEndpoint.py +++ b/src/backend/langflow/components/llms/ChatOllamaEndpoint.py @@ -163,6 +163,7 @@ class ChatOllamaComponent(CustomComponent): "display_name": "Template", "field_type": "str", "info": "Template to use for generating text.", + "advanced": True, }, } @@ -210,6 +211,7 @@ class ChatOllamaComponent(CustomComponent): mirostat_eta = None mirostat_tau = None + # Mapping system settings to their corresponding values llm_params = { "base_url": base_url, "cache": cache,