From 6309de1654a74e241adf0227185410a07ea89029 Mon Sep 17 00:00:00 2001 From: Gabriel Luiz Freitas Almeida Date: Tue, 5 Sep 2023 11:55:53 -0300 Subject: [PATCH] =?UTF-8?q?=F0=9F=9A=80=20feat:=20add=20Basic=20Chat=20wit?= =?UTF-8?q?h=20Prompt=20and=20History=20(2)=20node?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit â„šī¸ This commit adds a new node called "Basic Chat with Prompt and History (2)" to the chat application. This node is responsible for creating a simple chat interface with a custom prompt template and a conversational memory buffer. The node configuration includes various properties such as callbacks, cache, client, max_retries, max_tokens, metadata, model_kwargs, model_name, n, openai_api_base, openai_api_key, openai_organization, openai_proxy, and more. This new node enhances the chat functionality by providing a more interactive and dynamic chat experience for users. 📝 chore(tests): add BasicChatwithPromptandHistory.json test data file for testing purposes 🔧 fix(test_endpoints.py): remove unused imports and trailing whitespace ✅ test(test_endpoints.py): add test for basic chat in process endpoint to verify correct response and session_id generation ✅ test(test_endpoints.py): add test for basic chat with different session_ids to verify correct response ✅ test(test_endpoints.py): add test for basic chat with two session_ids and names to verify correct response --- tests/data/BasicChatwithPromptandHistory.json | 1 + tests/test_endpoints.py | 89 +++++++++++++++++++ 2 files changed, 90 insertions(+) create mode 100644 tests/data/BasicChatwithPromptandHistory.json diff --git a/tests/data/BasicChatwithPromptandHistory.json b/tests/data/BasicChatwithPromptandHistory.json new file mode 100644 index 000000000..658ac0479 --- /dev/null +++ b/tests/data/BasicChatwithPromptandHistory.json @@ -0,0 +1 @@ +{"description":"A simple chat with a custom prompt template and conversational memory buffer","name":"Basic Chat with Prompt and History (2)","data":{"nodes":[{"width":384,"height":621,"id":"ChatOpenAI-vy7fV","type":"genericNode","position":{"x":170.87326389541306,"y":465.8628482073749},"data":{"type":"ChatOpenAI","node":{"template":{"callbacks":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"callbacks","advanced":false,"dynamic":false,"info":"","type":"langchain.callbacks.base.BaseCallbackHandler","list":true},"cache":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"cache","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"client":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"client","advanced":false,"dynamic":false,"info":"","type":"Any","list":false},"max_retries":{"required":false,"placeholder":"","show":false,"multiline":false,"value":6,"password":false,"name":"max_retries","advanced":false,"dynamic":false,"info":"","type":"int","list":false},"max_tokens":{"required":false,"placeholder":"","show":true,"multiline":false,"password":true,"name":"max_tokens","advanced":false,"dynamic":false,"info":"","type":"int","list":false,"value":""},"metadata":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"metadata","advanced":false,"dynamic":false,"info":"","type":"code","list":false},"model_kwargs":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"model_kwargs","advanced":true,"dynamic":false,"info":"","type":"code","list":false},"model_name":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"gpt-3.5-turbo","password":false,"options":["gpt-3.5-turbo-0613","gpt-3.5-turbo","gpt-3.5-turbo-16k-0613","gpt-3.5-turbo-16k","gpt-4-0613","gpt-4-32k-0613","gpt-4","gpt-4-32k"],"name":"model_name","advanced":false,"dynamic":false,"info":"","type":"str","list":true},"n":{"required":false,"placeholder":"","show":false,"multiline":false,"value":1,"password":false,"name":"n","advanced":false,"dynamic":false,"info":"","type":"int","list":false},"openai_api_base":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"openai_api_base","display_name":"OpenAI API Base","advanced":false,"dynamic":false,"info":"\nThe base URL of the OpenAI API. Defaults to https://api.openai.com/v1.\n\nYou can change this to use other APIs like JinaChat, LocalAI and Prem.\n","type":"str","list":false},"openai_api_key":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":true,"name":"openai_api_key","display_name":"OpenAI API Key","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"openai_organization":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"openai_organization","display_name":"OpenAI Organization","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"openai_proxy":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"openai_proxy","display_name":"OpenAI Proxy","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"request_timeout":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"request_timeout","advanced":false,"dynamic":false,"info":"","type":"float","list":false,"value":60},"streaming":{"required":false,"placeholder":"","show":false,"multiline":false,"value":false,"password":false,"name":"streaming","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"tags":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"tags","advanced":false,"dynamic":false,"info":"","type":"str","list":true},"temperature":{"required":false,"placeholder":"","show":true,"multiline":false,"value":0.7,"password":false,"name":"temperature","advanced":false,"dynamic":false,"info":"","type":"float","list":false},"tiktoken_model_name":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"tiktoken_model_name","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"verbose":{"required":false,"placeholder":"","show":false,"multiline":false,"value":false,"password":false,"name":"verbose","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"_type":"ChatOpenAI"},"description":"`OpenAI` Chat large language models API.","base_classes":["ChatOpenAI","BaseChatModel","BaseLanguageModel","BaseLLM"],"display_name":"ChatOpenAI","documentation":"https://python.langchain.com/docs/modules/model_io/models/chat/integrations/openai"},"id":"ChatOpenAI-vy7fV","value":null},"selected":true,"dragging":false,"positionAbsolute":{"x":170.87326389541306,"y":465.8628482073749}},{"width":384,"height":307,"id":"LLMChain-UjBh1","type":"genericNode","position":{"x":1250.1806448178158,"y":588.4657451068704},"data":{"type":"LLMChain","node":{"template":{"callbacks":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"callbacks","advanced":false,"dynamic":false,"info":"","type":"langchain.callbacks.base.BaseCallbackHandler","list":true},"llm":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"llm","advanced":false,"dynamic":false,"info":"","type":"BaseLanguageModel","list":false},"memory":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"memory","advanced":false,"dynamic":false,"info":"","type":"BaseMemory","list":false},"output_parser":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"output_parser","advanced":false,"dynamic":false,"info":"","type":"BaseLLMOutputParser","list":false},"prompt":{"required":true,"placeholder":"","show":true,"multiline":false,"password":false,"name":"prompt","advanced":false,"dynamic":false,"info":"","type":"BasePromptTemplate","list":false},"llm_kwargs":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"llm_kwargs","advanced":false,"dynamic":false,"info":"","type":"code","list":false},"metadata":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"metadata","advanced":false,"dynamic":false,"info":"","type":"code","list":false},"output_key":{"required":true,"placeholder":"","show":true,"multiline":false,"value":"text","password":false,"name":"output_key","advanced":true,"dynamic":false,"info":"","type":"str","list":false},"return_final_only":{"required":false,"placeholder":"","show":false,"multiline":false,"value":true,"password":false,"name":"return_final_only","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"tags":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"tags","advanced":false,"dynamic":false,"info":"","type":"str","list":true},"verbose":{"required":false,"placeholder":"","show":false,"multiline":false,"value":false,"password":false,"name":"verbose","advanced":true,"dynamic":false,"info":"","type":"bool","list":false},"_type":"LLMChain"},"description":"Chain to run queries against LLMs.","base_classes":["LLMChain","Chain","function"],"display_name":"LLMChain","documentation":"https://python.langchain.com/docs/modules/chains/foundational/llm_chain"},"id":"LLMChain-UjBh1","value":null},"selected":false,"positionAbsolute":{"x":1250.1806448178158,"y":588.4657451068704},"dragging":false},{"width":384,"height":273,"id":"PromptTemplate-5Q0W8","type":"genericNode","position":{"x":172.18064481781585,"y":67.26574510687044},"data":{"type":"PromptTemplate","node":{"template":{"output_parser":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"output_parser","advanced":false,"dynamic":false,"info":"","type":"BaseOutputParser","list":false},"input_variables":{"required":true,"placeholder":"","show":false,"multiline":false,"password":false,"name":"input_variables","advanced":false,"dynamic":false,"info":"","type":"str","list":true,"value":["history","text"]},"partial_variables":{"required":false,"placeholder":"","show":false,"multiline":false,"password":false,"name":"partial_variables","advanced":false,"dynamic":false,"info":"","type":"code","list":false},"template":{"required":true,"placeholder":"","show":true,"multiline":true,"password":false,"name":"template","advanced":false,"dynamic":false,"info":"","type":"prompt","list":false,"value":"The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\n{history}\nHuman: {text}\nAI:"},"template_format":{"required":false,"placeholder":"","show":false,"multiline":false,"value":"f-string","password":false,"name":"template_format","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"validate_template":{"required":false,"placeholder":"","show":false,"multiline":false,"value":true,"password":false,"name":"validate_template","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"_type":"PromptTemplate","history":{"required":false,"placeholder":"","show":true,"multiline":true,"value":"","password":false,"name":"history","display_name":"history","advanced":false,"input_types":["Document","BaseOutputParser"],"dynamic":false,"info":"","type":"str","list":false},"text":{"required":false,"placeholder":"","show":true,"multiline":true,"value":"","password":false,"name":"text","display_name":"text","advanced":false,"input_types":["Document","BaseOutputParser"],"dynamic":false,"info":"","type":"str","list":false}},"description":"A prompt template for a language model.","base_classes":["StringPromptTemplate","PromptTemplate","BasePromptTemplate"],"name":"","display_name":"PromptTemplate","documentation":"https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/","custom_fields":{"template":["history","text"]},"output_types":[],"field_formatters":{"formatters":{"openai_api_key":{}},"base_formatters":{"kwargs":{},"optional":{},"list":{},"dict":{},"union":{},"multiline":{},"show":{},"password":{},"default":{},"headers":{},"dict_code_file":{},"model_fields":{"MODEL_DICT":{"OpenAI":["text-davinci-003","text-davinci-002","text-curie-001","text-babbage-001","text-ada-001"],"ChatOpenAI":["gpt-3.5-turbo-0613","gpt-3.5-turbo","gpt-3.5-turbo-16k-0613","gpt-3.5-turbo-16k","gpt-4-0613","gpt-4-32k-0613","gpt-4","gpt-4-32k"],"Anthropic":["claude-v1","claude-v1-100k","claude-instant-v1","claude-instant-v1-100k","claude-v1.3","claude-v1.3-100k","claude-v1.2","claude-v1.0","claude-instant-v1.1","claude-instant-v1.1-100k","claude-instant-v1.0"],"ChatAnthropic":["claude-v1","claude-v1-100k","claude-instant-v1","claude-instant-v1-100k","claude-v1.3","claude-v1.3-100k","claude-v1.2","claude-v1.0","claude-instant-v1.1","claude-instant-v1.1-100k","claude-instant-v1.0"]}}}},"beta":false,"error":null},"id":"PromptTemplate-5Q0W8","value":null},"selected":false,"dragging":false,"positionAbsolute":{"x":172.18064481781585,"y":67.26574510687044}},{"width":384,"height":561,"id":"ConversationBufferMemory-Lu2Nb","type":"genericNode","position":{"x":802.1806448178158,"y":43.265745106870426},"data":{"type":"ConversationBufferMemory","node":{"template":{"chat_memory":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"chat_memory","advanced":false,"dynamic":false,"info":"","type":"BaseChatMessageHistory","list":false},"ai_prefix":{"required":false,"placeholder":"","show":false,"multiline":false,"value":"AI","password":false,"name":"ai_prefix","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"human_prefix":{"required":false,"placeholder":"","show":false,"multiline":false,"value":"Human","password":false,"name":"human_prefix","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"input_key":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":false,"name":"input_key","advanced":false,"dynamic":false,"info":"The variable to be used as Chat Input when more than one variable is available.","type":"str","list":false},"memory_key":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"history","password":false,"name":"memory_key","advanced":false,"dynamic":false,"info":"","type":"str","list":false},"output_key":{"required":false,"placeholder":"","show":true,"multiline":false,"value":"","password":false,"name":"output_key","advanced":false,"dynamic":false,"info":"The variable to be used as Chat Output (e.g. answer in a ConversationalRetrievalChain)","type":"str","list":false},"return_messages":{"required":false,"placeholder":"","show":true,"multiline":false,"password":false,"name":"return_messages","advanced":false,"dynamic":false,"info":"","type":"bool","list":false},"_type":"ConversationBufferMemory"},"description":"Buffer for storing conversation memory.","base_classes":["BaseMemory","ConversationBufferMemory","BaseChatMemory"],"display_name":"ConversationBufferMemory","documentation":"https://python.langchain.com/docs/modules/memory/how_to/buffer"},"id":"ConversationBufferMemory-Lu2Nb","value":null},"selected":false,"positionAbsolute":{"x":802.1806448178158,"y":43.265745106870426},"dragging":false}],"edges":[{"source":"ChatOpenAI-vy7fV","sourceHandle":"ChatOpenAI|ChatOpenAI-vy7fV|ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLLM","target":"LLMChain-UjBh1","targetHandle":"BaseLanguageModel|llm|LLMChain-UjBh1","className":"","id":"reactflow__edge-ChatOpenAI-vy7fVChatOpenAI|ChatOpenAI-vy7fV|ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLLM-LLMChain-UjBh1BaseLanguageModel|llm|LLMChain-UjBh1","selected":false,"animated":false,"style":{"stroke":"#555"}},{"source":"PromptTemplate-5Q0W8","sourceHandle":"PromptTemplate|PromptTemplate-5Q0W8|StringPromptTemplate|PromptTemplate|BasePromptTemplate","target":"LLMChain-UjBh1","targetHandle":"BasePromptTemplate|prompt|LLMChain-UjBh1","className":"","id":"reactflow__edge-PromptTemplate-5Q0W8PromptTemplate|PromptTemplate-5Q0W8|StringPromptTemplate|PromptTemplate|BasePromptTemplate-LLMChain-UjBh1BasePromptTemplate|prompt|LLMChain-UjBh1","animated":false,"style":{"stroke":"#555"}},{"source":"ConversationBufferMemory-Lu2Nb","sourceHandle":"ConversationBufferMemory|ConversationBufferMemory-Lu2Nb|BaseMemory|ConversationBufferMemory|BaseChatMemory","target":"LLMChain-UjBh1","targetHandle":"BaseMemory|memory|LLMChain-UjBh1","className":"","id":"reactflow__edge-ConversationBufferMemory-Lu2NbConversationBufferMemory|ConversationBufferMemory-Lu2Nb|BaseMemory|ConversationBufferMemory|BaseChatMemory-LLMChain-UjBh1BaseMemory|memory|LLMChain-UjBh1","animated":false,"style":{"stroke":"#555"}}],"viewport":{"x":-64.70809474436828,"y":44.7801470275611,"zoom":0.6622606580990782}},"id":"0cdfb2f2-19de-4e15-99fa-fd5203b38053"} \ No newline at end of file diff --git a/tests/test_endpoints.py b/tests/test_endpoints.py index 376de6f4e..29c5d5a91 100644 --- a/tests/test_endpoints.py +++ b/tests/test_endpoints.py @@ -360,3 +360,92 @@ def test_various_prompts(client, prompt, expected_input_variables): response = client.post("api/v1/validate/prompt", json=PROMPT_REQUEST) assert response.status_code == 200 assert response.json()["input_variables"] == expected_input_variables + + +def test_basic_chat_in_process(client, added_flow, created_api_key): + # Run the /api/v1/process/{flow_id} endpoint + headers = {"x-api-key": created_api_key.api_key} + post_data = {"inputs": {"text": "Hi, My name is Gabriel"}} + response = client.post( + f"api/v1/process/{added_flow.get('id')}", + headers=headers, + json=post_data, + ) + assert response.status_code == 200, response.json() + # Check the response + assert "Gabriel" in response.json()["result"]["text"] + # session_id should be returned + assert "session_id" in response.json() + assert response.json()["session_id"] is not None + # New request with the same session_id + # asking "What is my name?" should return "Gabriel" + post_data = { + "inputs": {"text": "What is my name?"}, + "session_id": response.json()["session_id"], + } + response = client.post( + f"api/v1/process/{added_flow.get('id')}", + headers=headers, + json=post_data, + ) + assert response.status_code == 200, response.json() + assert "Gabriel" in response.json()["result"]["text"] + + +def test_basic_chat_different_session_ids(client, added_flow, created_api_key): + # Run the /api/v1/process/{flow_id} endpoint + headers = {"x-api-key": created_api_key.api_key} + post_data = {"inputs": {"text": "Hi, My name is Gabriel"}} + response = client.post( + f"api/v1/process/{added_flow.get('id')}", + headers=headers, + json=post_data, + ) + assert response.status_code == 200, response.json() + # Check the response + assert "Gabriel" in response.json()["result"]["text"] + # session_id should be returned + assert "session_id" in response.json() + assert response.json()["session_id"] is not None + # New request with a different session_id + # asking "What is my name?" should return "Gabriel" + post_data = { + "inputs": {"text": "What is my name?"}, + "session_id": "other session id", + } + response = client.post( + f"api/v1/process/{added_flow.get('id')}", + headers=headers, + json=post_data, + ) + assert response.status_code == 200, response.json() + assert "Gabriel" not in response.json()["result"]["text"] + + +@pytest.mark.parametrize("name", ["Gabriel", "John"]) +def test_basic_chat_with_two_session_ids_and_names( + client, added_flow, created_api_key, name +): + headers = {"x-api-key": created_api_key.api_key} + + def run_api_post(endpoint, headers, post_data): + response = client.post(endpoint, headers=headers, json=post_data) + assert response.status_code == 200, response.json() + return response.json() + + def assert_name_and_session_in_response(name, response_json): + assert name in response_json["result"]["text"] + assert "session_id" in response_json + assert response_json["session_id"] is not None + + # Run the /api/v1/process/{flow_id} endpoint + flow_id = added_flow.get("id") + post_data = {"inputs": {"text": f"Hi, My name is {name}"}} + response_json = run_api_post(f"api/v1/process/{flow_id}", headers, post_data) + assert_name_and_session_in_response(name, response_json) + session_id = response_json["session_id"] + + # New request with the same session_id asking "What is my name?" should return name + post_data = {"inputs": {"text": "What is my name?"}, "session_id": session_id} + response_json = run_api_post(f"api/v1/process/{flow_id}", headers, post_data) + assert name in response_json["result"]["text"]