diff --git a/CODEOWNERS b/CODEOWNERS index 0877a44f..978e1173 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -11,4 +11,3 @@ # * @vrdmr @gavin-aguiar @YunchuWang @pdthummar @hallvictoria - diff --git a/azure/functions/decorators/constants.py b/azure/functions/decorators/constants.py index ce439e64..0948bc58 100644 --- a/azure/functions/decorators/constants.py +++ b/azure/functions/decorators/constants.py @@ -28,9 +28,16 @@ DAPR_SECRET = "daprSecret" DAPR_PUBLISH = "daprPublish" DAPR_INVOKE = "daprInvoke" -DAPR_PUBLISH = "daprPublish" DAPR_BINDING = "daprBinding" ORCHESTRATION_TRIGGER = "orchestrationTrigger" ACTIVITY_TRIGGER = "activityTrigger" ENTITY_TRIGGER = "entityTrigger" DURABLE_CLIENT = "durableClient" +ASSISTANT_SKILLS_TRIGGER = "assistantSkillsTrigger" +TEXT_COMPLETION = "textCompletion" +ASSISTANT_QUERY = "assistantQuery" +EMBEDDINGS = "embeddings" +EMBEDDINGS_STORE = "embeddingsStore" +ASSISTANT_CREATE = "assistantCreate" +ASSISTANT_POST = "assistantPost" +SEMANTIC_SEARCH = "semanticSearch" diff --git a/azure/functions/decorators/function_app.py b/azure/functions/decorators/function_app.py index 172e2b05..fe17ceb5 100644 --- a/azure/functions/decorators/function_app.py +++ b/azure/functions/decorators/function_app.py @@ -35,6 +35,11 @@ parse_iterable_param_to_enums, StringifyEnumJsonEncoder from azure.functions.http import HttpRequest from .generic import GenericInputBinding, GenericTrigger, GenericOutputBinding +from .openai import AssistantSkillTrigger, OpenAIModels, TextCompletionInput, \ + AssistantCreateOutput, \ + AssistantQueryInput, AssistantPostInput, InputType, EmbeddingsInput, \ + semantic_search_system_prompt, \ + SemanticSearchInput, EmbeddingsStoreOutput from .retry_policy import RetryPolicy from .function_name import FunctionName from .warmup import WarmUpTrigger @@ -294,7 +299,9 @@ def decorator(): self._function_builders.pop() self._function_builders.append(function_builder) return function_builder + return decorator() + return wrap def _get_durable_blueprint(self): @@ -307,9 +314,10 @@ def _get_durable_blueprint(self): df_bp = df.Blueprint() return df_bp except ImportError: - error_message = "Attempted to use a Durable Functions decorator, "\ - "but the `azure-functions-durable` SDK package could not be "\ - "found. Please install `azure-functions-durable` to use "\ + error_message = \ + "Attempted to use a Durable Functions decorator, " \ + "but the `azure-functions-durable` SDK package could not be " \ + "found. Please install `azure-functions-durable` to use " \ "Durable Functions." raise Exception(error_message) @@ -1435,6 +1443,68 @@ def decorator(): return wrap + def assistant_skill_trigger(self, + arg_name: str, + function_description: str, + function_name: Optional[str] = None, + parameter_description_json: Optional[str] = None, # NoQA + model: Optional[OpenAIModels] = OpenAIModels.DefaultChatModel, # NoQA + data_type: Optional[ + Union[DataType, str]] = None, + **kwargs: Any) -> Callable[..., Any]: + """ + Assistants build on top of the chat functionality to provide assistants + with custom skills defined as functions. This internally uses the + function calling feature OpenAIs GPT models to select which functions + to invoke and when. + Ref: https://platform.openai.com/docs/guides/function-calling + + You can define functions that can be triggered by assistants by using + + the `assistantSkillTrigger` trigger binding. These functions are + invoked by the extension when an assistant signals that it would like + to invoke a function in response to a user prompt. + + The name of the function, the description provided by the trigger, + and the parameter name are all hints that the underlying language model + use to determine when and how to invoke an assistant function. + + :param arg_name: The name of trigger parameter in the function code. + :param function_description: The description of the assistant function, + which is provided to the model. + :param function_name: The assistant function, which is provided to the + LLM. + :param parameter_description_json: A JSON description of the function + parameter, which is provided to the LLM. + If no description is provided, the description will be autogenerated. + :param model: The OpenAI chat model to use. + :param data_type: Defines how Functions runtime should treat the + parameter value. + :param kwargs: Keyword arguments for specifying additional binding + fields to include in the binding json. + + :return: Decorator function. + + """ + @self._configure_function_builder + def wrap(fb): + def decorator(): + fb.add_trigger( + trigger=AssistantSkillTrigger( + name=arg_name, + function_description=function_description, + function_name=function_name, + parameter_description_json=parameter_description_json, + model=model, + data_type=parse_singular_param_to_enum(data_type, + DataType), + **kwargs)) + return fb + + return decorator() + + return wrap + class BindingApi(DecoratorApi, ABC): """Interface to extend for using existing binding decorator functions.""" @@ -2542,8 +2612,6 @@ def dapr_state_output(self, :param arg_name: The name of the variable that represents DaprState output object in function code. - :param arg_name: The name of the variable that represents DaprState - input object in function code. :param state_store: State store containing the state for keys. :param key: The name of the key. :param dapr_address: Dapr address, it is optional field, by default @@ -2597,8 +2665,6 @@ def dapr_invoke_output(self, :param arg_name: The name of the variable that represents DaprState output object in function code. - :param arg_name: The name of the variable that represents DaprState - input object in function code. :param app_id: The dapr app name to invoke. :param method_name: The method name of the app to invoke. :param http_verb: The http verb of the app to invoke. @@ -2653,8 +2719,6 @@ def dapr_publish_output(self, :param arg_name: The name of the variable that represents DaprState output object in function code. - :param arg_name: The name of the variable that represents DaprState - input object in function code. :param pub_sub_name: The pub/sub name to publish to. :param topic: The name of the topic to publish to. :param dapr_address: Dapr address, it is optional field, by default @@ -2708,8 +2772,6 @@ def dapr_binding_output(self, :param arg_name: The name of the variable that represents DaprState output object in function code. - :param arg_name: The name of the variable that represents DaprState - input object in function code. :param binding_name: The configured name of the binding. :param operation: The configured operation. :param dapr_address: Dapr address, it is optional field, by default @@ -2740,6 +2802,377 @@ def decorator(): return wrap + def text_completion_input(self, + arg_name: str, + prompt: str, + model: Optional[OpenAIModels] = OpenAIModels.DefaultChatModel, # NoQA + temperature: Optional[str] = "0.5", + top_p: Optional[str] = None, + max_tokens: Optional[str] = "100", + data_type: Optional[Union[DataType, str]] = None, + **kwargs) \ + -> Callable[..., Any]: + """ + The textCompletion input binding can be used to invoke the + OpenAI Chat Completions API and return the results to the function. + + Ref: https://platform.openai.com/docs/guides/text-generation/chat-completions-vs-completions # NoQA + + The examples below define "who is" HTTP-triggered functions with a + hardcoded `"who is {name}?"` prompt, where `{name}` is the substituted + with the value in the HTTP request path. The OpenAI input binding + invokes the OpenAI GPT endpoint to surface the answer to the prompt to + the function, which then returns the result text as the response + content. + + :param arg_name: The name of binding parameter in the function code. + :param prompt: The prompt to generate completions for, encoded as a + string. + :param model: the ID of the model to use. + :param temperature: The sampling temperature to use, between 0 and 2. + Higher values like 0.8 will make the output more random, while lower + values like 0.2 will make it more focused and deterministic. + :param top_p: An alternative to sampling with temperature, called + nucleus sampling, where the model considers the results of the tokens + with top_p probability mass. So 0.1 means only the tokens comprising + the top 10% probability mass are considered. It's generally recommend + to use this or temperature + :param max_tokens: The maximum number of tokens to generate in the + completion. The token count of your prompt plus max_tokens cannot + exceed the model's context length. Most models have a context length of + 2048 tokens (except for the newest models, which support 4096). + :param data_type: Defines how Functions runtime should treat the + parameter value + :param kwargs: Keyword arguments for specifying additional binding + fields to include in the binding json + + :return: Decorator function. + """ + + @self._configure_function_builder + def wrap(fb): + def decorator(): + fb.add_binding( + binding=TextCompletionInput( + name=arg_name, + prompt=prompt, + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + data_type=parse_singular_param_to_enum(data_type, + DataType), + **kwargs)) + return fb + + return decorator() + + return wrap + + def assistant_create_output(self, arg_name: str, + data_type: Optional[ + Union[DataType, str]] = None, + **kwargs) \ + -> Callable[..., Any]: + """ + The assistantCreate output binding creates a new assistant with a + specified system prompt. + + :param arg_name: The name of binding parameter in the function code. + :param data_type: Defines how Functions runtime should treat the + parameter value + :param kwargs: Keyword arguments for specifying additional binding + fields to include in the binding json + + :return: Decorator function. + """ + + @self._configure_function_builder + def wrap(fb): + def decorator(): + fb.add_binding( + binding=AssistantCreateOutput( + name=arg_name, + data_type=parse_singular_param_to_enum(data_type, + DataType), + **kwargs)) + return fb + + return decorator() + + return wrap + + def assistant_query_input(self, + arg_name: str, + id: str, + timestamp_utc: str, + data_type: Optional[ + Union[DataType, str]] = None, + **kwargs) \ + -> Callable[..., Any]: + """ + The assistantQuery input binding fetches the assistant history and + passes it to the function. + + :param arg_name: The name of binding parameter in the function code. + :param timestamp_utc: the timestamp of the earliest message in the chat + history to fetch. The timestamp should be in ISO 8601 format - for + example, 2023-08-01T00:00:00Z. + :param id: The ID of the Assistant to query. + :param data_type: Defines how Functions runtime should treat the + parameter value + :param kwargs: Keyword arguments for specifying additional binding + fields to include in the binding json + + :return: Decorator function. + """ + + @self._configure_function_builder + def wrap(fb): + def decorator(): + fb.add_binding( + binding=AssistantQueryInput( + name=arg_name, + id=id, + timestamp_utc=timestamp_utc, + data_type=parse_singular_param_to_enum(data_type, + DataType), + **kwargs)) + return fb + + return decorator() + + return wrap + + def assistant_post_input(self, arg_name: str, + id: str, + user_message: str, + model: Optional[str] = None, + data_type: Optional[ + Union[DataType, str]] = None, + **kwargs) \ + -> Callable[..., Any]: + """ + The assistantPost output binding sends a message to the assistant and + saves the response in its internal state. + + :param arg_name: The name of binding parameter in the function code. + :param id: The ID of the assistant to update. + :param user_message: The user message that user has entered for + assistant to respond to. + :param model: The OpenAI chat model to use. + :param data_type: Defines how Functions runtime should treat the + parameter value + :param kwargs: Keyword arguments for specifying additional binding + fields to include in the binding json + + :return: Decorator function. + """ + + @self._configure_function_builder + def wrap(fb): + def decorator(): + fb.add_binding( + binding=AssistantPostInput( + name=arg_name, + id=id, + user_message=user_message, + model=model, + data_type=parse_singular_param_to_enum(data_type, + DataType), + **kwargs)) + return fb + + return decorator() + + return wrap + + def embeddings_input(self, + arg_name: str, + input: str, + input_type: InputType, + model: Optional[str] = None, + max_chunk_length: Optional[int] = 8 * 1024, + max_overlap: Optional[int] = 128, + data_type: Optional[ + Union[DataType, str]] = None, + **kwargs) \ + -> Callable[..., Any]: + """ + The embeddings input decorator creates embeddings which will be used to + measure the relatedness of text strings. + + Ref: https://platform.openai.com/docs/guides/embeddings + + :param arg_name: The name of binding parameter in the function code. + :param input: The input source containing the data to generate + embeddings for. + :param input_type: The type of the input. + :param model: The ID of the model to use. + :param max_chunk_length: The maximum number of characters to chunk the + input into. Default value: 8 * 1024 + :param max_overlap: The maximum number of characters to overlap + between chunks. Default value: 128 + :param data_type: Defines how Functions runtime should treat the + parameter value + :param kwargs: Keyword arguments for specifying additional binding + fields to include in the binding json + + :return: Decorator function. + """ + + @self._configure_function_builder + def wrap(fb): + def decorator(): + fb.add_binding( + binding=EmbeddingsInput( + name=arg_name, + input=input, + input_type=input_type, + model=model, + max_chunk_length=max_chunk_length, + max_overlap=max_overlap, + data_type=parse_singular_param_to_enum(data_type, + DataType), + **kwargs)) + return fb + + return decorator() + + return wrap + + def semantic_search_input(self, + arg_name: str, + connection_name: str, + collection: str, + query: Optional[str] = None, + embeddings_model: Optional[OpenAIModels] = OpenAIModels.DefaultEmbeddingsModel, # NoQA + chat_model: Optional[OpenAIModels] = OpenAIModels.DefaultChatModel, # NoQA + system_prompt: Optional[str] = semantic_search_system_prompt, # NoQA + max_knowledge_count: Optional[int] = 1, + data_type: Optional[ + Union[DataType, str]] = None, + **kwargs) \ + -> Callable[..., Any]: + """ + The semantic search feature allows you to import documents into a + vector database using an output binding and query the documents in that + database using an input binding. For example, you can have a function + that imports documents into a vector database and another function that + issues queries to OpenAI using content stored in the vector database as + context (also known as the Retrieval Augmented Generation, or RAG + technique). + + Ref: https://platform.openai.com/docs/guides/embeddings + + :param arg_name: The name of binding parameter in the function code. + :param connection_name: app setting or environment variable which + contains a connection string value. + :param collection: The name of the collection or table to search or + store. + :param query: The semantic query text to use for searching. + :param embeddings_model: The ID of the model to use for embeddings. + The default value is "text-embedding-ada-002". + :param chat_model: The name of the Large Language Model to invoke for + chat responses. The default value is "gpt-3.5-turbo". + :param system_prompt: Optional. The system prompt to use for prompting + the large language model. + :param max_knowledge_count: Optional. The number of knowledge items to + inject into the SystemPrompt. Default value: 1 + :param data_type: Optional. Defines how Functions runtime should treat + the parameter value. Default value: None + :param kwargs: Keyword arguments for specifying additional binding + fields to include in the binding json + + :return: Decorator function. + """ + + @self._configure_function_builder + def wrap(fb): + def decorator(): + fb.add_binding( + binding=SemanticSearchInput( + name=arg_name, + connection_name=connection_name, + collection=collection, + query=query, + embeddings_model=embeddings_model, + chat_model=chat_model, + system_prompt=system_prompt, + max_knowledge_count=max_knowledge_count, + data_type=parse_singular_param_to_enum(data_type, + DataType), + **kwargs)) + return fb + + return decorator() + + return wrap + + def embeddings_store_output(self, + arg_name: str, + input: str, + input_type: InputType, + connection_name: str, + collection: str, + model: Optional[OpenAIModels] = OpenAIModels.DefaultEmbeddingsModel, # NoQA + max_chunk_length: Optional[int] = 8 * 1024, + max_overlap: Optional[int] = 128, + data_type: Optional[ + Union[DataType, str]] = None, + **kwargs) \ + -> Callable[..., Any]: + """ + Supported list of embeddings store is extensible, and more can be + added by authoring a specially crafted NuGet package. Visit the + currently supported vector specific folder for specific usage + information: + + - Azure AI Search + - Azure Data Explorer + - Azure Cosmos DB using MongoDB + + :param arg_name: The name of binding parameter in the function code. + :param input: The input to generate embeddings for. + :param input_type: The type of the input. + :param connection_name: The name of an app setting or environment + variable which contains a connection string value + :param collection: The collection or table to search. + :param model: The ID of the model to use. + :param max_chunk_length: The maximum number of characters to chunk the + input into. + :param max_overlap: The maximum number of characters to overlap between + chunks. + :param data_type: Optional. Defines how Functions runtime should treat + the parameter value. Default value: None + :param kwargs: Keyword arguments for specifying additional binding + fields to include in the binding json + + :return: Decorator function. + """ + + @self._configure_function_builder + def wrap(fb): + def decorator(): + fb.add_binding( + binding=EmbeddingsStoreOutput( + name=arg_name, + input=input, + input_type=input_type, + connection_name=connection_name, + collection=collection, + model=model, + max_chunk_length=max_chunk_length, + max_overlap=max_overlap, + data_type=parse_singular_param_to_enum(data_type, + DataType), + **kwargs)) + return fb + + return decorator() + + return wrap + class SettingsApi(DecoratorApi, ABC): """Interface to extend for using existing settings decorator in diff --git a/azure/functions/decorators/openai.py b/azure/functions/decorators/openai.py new file mode 100644 index 00000000..546a87e4 --- /dev/null +++ b/azure/functions/decorators/openai.py @@ -0,0 +1,216 @@ +from typing import Optional + +from azure.functions.decorators.constants import (ASSISTANT_SKILLS_TRIGGER, + TEXT_COMPLETION, + ASSISTANT_QUERY, + EMBEDDINGS, EMBEDDINGS_STORE, + ASSISTANT_CREATE, + ASSISTANT_POST, + SEMANTIC_SEARCH) +from azure.functions.decorators.core import Trigger, DataType, InputBinding, \ + OutputBinding +from azure.functions.decorators.utils import StringifyEnum + + +class InputType(StringifyEnum): + + RawText = "raw_text", + FilePath = "file_path" + + +class OpenAIModels(StringifyEnum): + DefaultChatModel = "gpt-3.5-turbo" + DefaultEmbeddingsModel = "text-embedding-ada-002" + + +class AssistantSkillTrigger(Trigger): + + @staticmethod + def get_binding_name() -> str: + return ASSISTANT_SKILLS_TRIGGER + + def __init__(self, + name: str, + function_description: str, + function_name: Optional[str] = None, + parameter_description_json: Optional[str] = None, + model: Optional[OpenAIModels] = OpenAIModels.DefaultChatModel, + data_type: Optional[DataType] = None, + **kwargs): + self.function_description = function_description + self.function_name = function_name + self.parameter_description_json = parameter_description_json + self.model = model + super().__init__(name=name, data_type=data_type) + + +class TextCompletionInput(InputBinding): + + @staticmethod + def get_binding_name() -> str: + return TEXT_COMPLETION + + def __init__(self, + name: str, + prompt: str, + model: Optional[OpenAIModels] = OpenAIModels.DefaultChatModel, + temperature: Optional[str] = "0.5", + top_p: Optional[str] = None, + max_tokens: Optional[str] = "100", + data_type: Optional[DataType] = None, + **kwargs): + self.prompt = prompt + self.model = model + self.temperature = temperature + self.top_p = top_p + self.max_tokens = max_tokens + super().__init__(name=name, data_type=data_type) + + +class AssistantQueryInput(InputBinding): + + @staticmethod + def get_binding_name(): + return ASSISTANT_QUERY + + def __init__(self, + name: str, + id: str, + timestamp_utc: str, + data_type: Optional[DataType] = None, + **kwargs): + self.id = id + self.timestamp_utc = timestamp_utc + super().__init__(name=name, data_type=data_type) + + +class EmbeddingsInput(InputBinding): + + @staticmethod + def get_binding_name() -> str: + return EMBEDDINGS + + def __init__(self, + name: str, + input: str, + input_type: InputType, + model: Optional[str] = None, + max_chunk_length: Optional[int] = 8 * 1024, + max_overlap: Optional[int] = 128, + data_type: Optional[DataType] = None, + **kwargs): + self.name = name + self.input = input + self.input_type = input_type + self.model = model + self.max_chunk_length = max_chunk_length + self.max_overlap = max_overlap + super().__init__(name=name, data_type=data_type) + + +semantic_search_system_prompt = \ + """You are a helpful assistant. You are responding to requests + from a user about internal emails and documents. You can and + should refer to the internal documents to help respond to + requests. If a user makes a request that's not covered by the + internal emails and documents, explain that you don't know the + answer or that you don't have access to the information. + + The following is a list of documents that you can refer to when + answering questions. The documents are in the format + [filename]: [text] and are separated by newlines. If you answer + a question by referencing any of the documents, please cite the + document in your answer. For example, if you answer a question + by referencing info.txt, you should add "Reference: info.txt" + to the end of your answer on a separate line.""" + + +class SemanticSearchInput(InputBinding): + + @staticmethod + def get_binding_name() -> str: + return SEMANTIC_SEARCH + + def __init__(self, + name: str, + connection_name: str, + collection: str, + query: Optional[str] = None, + embeddings_model: Optional[ + OpenAIModels] = OpenAIModels.DefaultEmbeddingsModel, + chat_model: Optional[ + OpenAIModels] = OpenAIModels.DefaultChatModel, + system_prompt: Optional[str] = semantic_search_system_prompt, + max_knowledge_count: Optional[int] = 1, + data_type: Optional[DataType] = None, + **kwargs): + self.name = name + self.connection_name = connection_name + self.collection = collection + self.query = query + self.embeddings_model = embeddings_model + self.chat_model = chat_model + self.system_prompt = system_prompt + self.max_knowledge_count = max_knowledge_count + super().__init__(name=name, data_type=data_type) + + +class AssistantPostInput(InputBinding): + + @staticmethod + def get_binding_name(): + return ASSISTANT_POST + + def __init__(self, name: str, + id: str, + user_message: str, + model: Optional[str] = None, + data_type: Optional[DataType] = None, + **kwargs): + self.name = name + self.id = id + self.user_message = user_message + self.model = model + super().__init__(name=name, data_type=data_type) + + +class EmbeddingsStoreOutput(OutputBinding): + + @staticmethod + def get_binding_name() -> str: + return EMBEDDINGS_STORE + + def __init__(self, + name: str, + input: str, + input_type: InputType, + connection_name: str, + collection: str, + model: Optional[ + OpenAIModels] = OpenAIModels.DefaultEmbeddingsModel, + max_chunk_length: Optional[int] = 8 * 1024, + max_overlap: Optional[int] = 128, + data_type: Optional[DataType] = None, + **kwargs): + self.name = name + self.input = input + self.input_type = input_type + self.connection_name = connection_name + self.collection = collection + self.model = model + self.max_chunk_length = max_chunk_length + self.max_overlap = max_overlap + super().__init__(name=name, data_type=data_type) + + +class AssistantCreateOutput(OutputBinding): + + @staticmethod + def get_binding_name(): + return ASSISTANT_CREATE + + def __init__(self, + name: str, + data_type: Optional[DataType] = None, + **kwargs): + super().__init__(name=name, data_type=data_type) diff --git a/tests/decorators/test_openai.py b/tests/decorators/test_openai.py new file mode 100644 index 00000000..9727890f --- /dev/null +++ b/tests/decorators/test_openai.py @@ -0,0 +1,181 @@ +import unittest + +from azure.functions import DataType +from azure.functions.decorators.core import BindingDirection +from azure.functions.decorators.openai import AssistantSkillTrigger, \ + TextCompletionInput, OpenAIModels, AssistantQueryInput, EmbeddingsInput, \ + AssistantCreateOutput, SemanticSearchInput, EmbeddingsStoreOutput, \ + AssistantPostInput + + +class TestOpenAI(unittest.TestCase): + + def test_assistant_skills_trigger_valid_creation(self): + trigger = AssistantSkillTrigger(name="test", + function_description="description", + function_name="test_function_name", + parameter_description_json="test_json", + model=OpenAIModels.DefaultChatModel, + data_type=DataType.UNDEFINED, + dummy_field="dummy") + self.assertEqual(trigger.get_binding_name(), + "assistantSkillsTrigger") + self.assertEqual( + trigger.get_dict_repr(), {"name": "test", + "functionDescription": "description", + "functionName": "test_function_name", + "parameterDescriptionJson": "test_json", + "model": OpenAIModels.DefaultChatModel, + "dataType": DataType.UNDEFINED, + 'type': 'assistantSkillsTrigger', + 'dummyField': 'dummy', + "direction": BindingDirection.IN, + }) + + def test_text_completion_input_valid_creation(self): + input = TextCompletionInput(name="test", + prompt="test_prompt", + temperature="1", + max_tokens="1", + data_type=DataType.UNDEFINED, + model=OpenAIModels.DefaultChatModel, + dummy_field="dummy") + self.assertEqual(input.get_binding_name(), + "textCompletion") + self.assertEqual(input.get_dict_repr(), + {"name": "test", + "temperature": "1", + "maxTokens": "1", + 'type': 'textCompletion', + "dataType": DataType.UNDEFINED, + "dummyField": "dummy", + "prompt": "test_prompt", + "direction": BindingDirection.IN, + "model": OpenAIModels.DefaultChatModel + }) + + def test_assistant_query_input_valid_creation(self): + input = AssistantQueryInput(name="test", + timestamp_utc="timestamp_utc", + data_type=DataType.UNDEFINED, + id="test_id", + type="assistantQueryInput", + dummy_field="dummy") + self.assertEqual(input.get_binding_name(), + "assistantQuery") + self.assertEqual(input.get_dict_repr(), + {"name": "test", + "timestampUtc": "timestamp_utc", + "dataType": DataType.UNDEFINED, + "direction": BindingDirection.IN, + "type": "assistantQuery", + "id": "test_id", + "dummyField": "dummy" + }) + + def test_embeddings_input_valid_creation(self): + input = EmbeddingsInput(name="test", + data_type=DataType.UNDEFINED, + input="test_input", + input_type="test_input_type", + model="test_model", + max_overlap=1, + max_chunk_length=1, + dummy_field="dummy") + self.assertEqual(input.get_binding_name(), + "embeddings") + self.assertEqual(input.get_dict_repr(), + {"name": "test", + "type": "embeddings", + "dataType": DataType.UNDEFINED, + "input": "test_input", + "inputType": "test_input_type", + "model": "test_model", + "maxOverlap": 1, + "maxChunkLength": 1, + "direction": BindingDirection.IN, + "dummyField": "dummy"}) + + def test_assistant_create_output_valid_creation(self): + output = AssistantCreateOutput(name="test", + data_type=DataType.UNDEFINED) + self.assertEqual(output.get_binding_name(), + "assistantCreate") + self.assertEqual(output.get_dict_repr(), + {"name": "test", + "dataType": DataType.UNDEFINED, + "direction": BindingDirection.OUT, + "type": "assistantCreate"}) + + def test_assistant_post_input_valid_creation(self): + input = AssistantPostInput(name="test", + id="test_id", + model="test_model", + user_message="test_message", + data_type=DataType.UNDEFINED, + dummy_field="dummy") + self.assertEqual(input.get_binding_name(), + "assistantPost") + self.assertEqual(input.get_dict_repr(), + {"name": "test", + "id": "test_id", + "model": "test_model", + "userMessage": "test_message", + "dataType": DataType.UNDEFINED, + "direction": BindingDirection.IN, + "dummyField": "dummy", + "type": "assistantPost"}) + + def test_semantic_search_input_valid_creation(self): + input = SemanticSearchInput(name="test", + data_type=DataType.UNDEFINED, + chat_model=OpenAIModels.DefaultChatModel, + embeddings_model=OpenAIModels.DefaultEmbeddingsModel, # NoQA + collection="test_collection", + connection_name="test_connection", + system_prompt="test_prompt", + query="test_query", + max_knowledge_count=1, + dummy_field="dummy_field") + self.assertEqual(input.get_binding_name(), + "semanticSearch") + self.assertEqual(input.get_dict_repr(), + {"name": "test", + "dataType": DataType.UNDEFINED, + "direction": BindingDirection.IN, + "dummyField": "dummy_field", + "chatModel": OpenAIModels.DefaultChatModel, + "embeddingsModel": OpenAIModels.DefaultEmbeddingsModel, # NoQA + "type": "semanticSearch", + "collection": "test_collection", + "connectionName": "test_connection", + "systemPrompt": "test_prompt", + "maxKnowledgeCount": 1, + "query": "test_query"}) + + def test_embeddings_store_output_valid_creation(self): + output = EmbeddingsStoreOutput(name="test", + data_type=DataType.UNDEFINED, + input="test_input", + input_type="test_input_type", + connection_name="test_connection", + max_overlap=1, + max_chunk_length=1, + collection="test_collection", + model=OpenAIModels.DefaultChatModel, + dummy_field="dummy_field") + self.assertEqual(output.get_binding_name(), + "embeddingsStore") + self.assertEqual(output.get_dict_repr(), + {"name": "test", + "dataType": DataType.UNDEFINED, + "direction": BindingDirection.OUT, + "dummyField": "dummy_field", + "input": "test_input", + "inputType": "test_input_type", + "collection": "test_collection", + "model": OpenAIModels.DefaultChatModel, + "connectionName": "test_connection", + "maxOverlap": 1, + "maxChunkLength": 1, + "type": "embeddingsStore"})