diff --git a/.mock/definition/api.yml b/.mock/definition/api.yml deleted file mode 100644 index 4ae27d7b..00000000 --- a/.mock/definition/api.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: api -error-discrimination: - strategy: status-code -default-environment: prod -default-url: Base -environments: - prod: - urls: - Base: https://api.hume.ai/ - evi: wss://api.hume.ai/v0/evi - tts: wss://api.hume.ai/v0/tts - stream: wss://api.hume.ai/v0/stream -auth: HeaderAuthScheme -auth-schemes: - HeaderAuthScheme: - header: X-Hume-Api-Key - type: optional - name: apiKey diff --git a/.mock/definition/empathic-voice/__package__.yml b/.mock/definition/empathic-voice/__package__.yml deleted file mode 100644 index e8d66b47..00000000 --- a/.mock/definition/empathic-voice/__package__.yml +++ /dev/null @@ -1,3567 +0,0 @@ -errors: - UnprocessableEntityError: - status-code: 422 - type: HTTPValidationError - docs: Validation Error - examples: - - value: {} - BadRequestError: - status-code: 400 - type: ErrorResponse - docs: Bad Request - examples: - - value: {} -types: - AssistantEnd: - docs: >- - **Indicates the conclusion of the assistant's response**, signaling that - the assistant has finished speaking for the current conversational turn. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - type: - type: literal<"assistant_end"> - docs: >- - The type of message sent through the socket; for an Assistant End - message, this must be `assistant_end`. - - - This message indicates the conclusion of the assistant's response, - signaling that the assistant has finished speaking for the current - conversational turn. - source: - openapi: evi-asyncapi.json - AssistantInput: - docs: >- - **Assistant text to synthesize into spoken audio and insert into the - conversation.** EVI uses this text to generate spoken audio using our - proprietary expressive text-to-speech model. - - - Our model adds appropriate emotional inflections and tones to the text - based on the user's expressions and the context of the conversation. The - synthesized audio is streamed back to the user as an Assistant Message. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - text: - type: string - docs: >- - Assistant text to synthesize into spoken audio and insert into the - conversation. - - - EVI uses this text to generate spoken audio using our proprietary - expressive text-to-speech model. Our model adds appropriate emotional - inflections and tones to the text based on the user's expressions and - the context of the conversation. The synthesized audio is streamed - back to the user as an [Assistant - Message](/reference/speech-to-speech-evi/chat#receive.AssistantMessage). - type: - type: literal<"assistant_input"> - docs: >- - The type of message sent through the socket; must be `assistant_input` - for our server to correctly identify and process it as an Assistant - Input message. - source: - openapi: evi-openapi.json - AssistantMessage: - docs: >- - **Transcript of the assistant's message.** Contains the message role, - content, and optionally tool call information including the tool name, - parameters, response requirement status, tool call ID, and tool type. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - from_text: - type: boolean - docs: >- - Indicates if this message was inserted into the conversation as text - from an [Assistant Input - message](/reference/speech-to-speech-evi/chat#send.AssistantInput.text). - id: - type: optional - docs: >- - ID of the assistant message. Allows the Assistant Message to be - tracked and referenced. - is_quick_response: - type: boolean - docs: Indicates if this message is a quick response or not. - language: - type: optional - docs: Detected language of the message text. - message: - type: ChatMessage - docs: Transcript of the message. - models: - type: Inference - docs: Inference model results. - type: - type: literal<"assistant_message"> - docs: >- - The type of message sent through the socket; for an Assistant Message, - this must be `assistant_message`. - - - This message contains both a transcript of the assistant's response - and the expression measurement predictions of the assistant's audio - output. - source: - openapi: evi-asyncapi.json - AssistantProsody: - docs: >- - **Expression measurement predictions of the assistant's audio output.** - Contains inference model results including prosody scores for 48 emotions - within the detected expression of the assistant's audio sample. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - id: - type: optional - docs: Unique identifier for the segment. - models: - type: Inference - docs: Inference model results. - type: - type: literal<"assistant_prosody"> - docs: >- - The type of message sent through the socket; for an Assistant Prosody - message, this must be `assistant_PROSODY`. - - - This message the expression measurement predictions of the assistant's - audio output. - source: - openapi: evi-asyncapi.json - AudioConfiguration: - properties: - channels: - type: integer - docs: Number of audio channels. - codec: - type: optional - docs: Optional codec information. - encoding: - type: Encoding - docs: Encoding format of the audio input, such as `linear16`. - sample_rate: - type: integer - docs: >- - Audio sample rate. Number of samples per second in the audio input, - measured in Hertz. - source: - openapi: evi-openapi.json - AudioInput: - docs: >- - **Base64 encoded audio input to insert into the conversation.** The - content is treated as the user's speech to EVI and must be streamed - continuously. Pre-recorded audio files are not supported. - - - For optimal transcription quality, the audio data should be transmitted in - small chunks. Hume recommends streaming audio with a buffer window of `20` - milliseconds (ms), or `100` milliseconds (ms) for web applications. See - our [Audio Guide](/docs/speech-to-speech-evi/guides/audio) for more - details on preparing and processing audio. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - data: - type: string - docs: >- - Base64 encoded audio input to insert into the conversation. - - - The content of an Audio Input message is treated as the user's speech - to EVI and must be streamed continuously. Pre-recorded audio files are - not supported. - - - For optimal transcription quality, the audio data should be - transmitted in small chunks. - - - Hume recommends streaming audio with a buffer window of 20 - milliseconds (ms), or 100 milliseconds (ms) for web applications. - type: - type: literal<"audio_input"> - docs: >- - The type of message sent through the socket; must be `audio_input` for - our server to correctly identify and process it as an Audio Input - message. - source: - openapi: evi-openapi.json - AudioOutput: - docs: >- - **Base64 encoded audio output.** This encoded audio is transmitted to the - client, where it can be decoded and played back as part of the user - interaction. The returned audio format is WAV and the sample rate is - 48kHz. - - - Contains the audio data, an ID to track and reference the audio output, - and an index indicating the chunk position relative to the whole audio - segment. See our [Audio Guide](/docs/speech-to-speech-evi/guides/audio) - for more details on preparing and processing audio. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - data: - type: string - docs: >- - Base64 encoded audio output. This encoded audio is transmitted to the - client, where it can be decoded and played back as part of the user - interaction. - id: - type: string - docs: >- - ID of the audio output. Allows the Audio Output message to be tracked - and referenced. - index: - type: integer - docs: Index of the chunk of audio relative to the whole audio segment. - type: - type: literal<"audio_output"> - docs: >- - The type of message sent through the socket; for an Audio Output - message, this must be `audio_output`. - source: - openapi: evi-asyncapi.json - BuiltInTool: - enum: - - web_search - - hang_up - source: - openapi: evi-openapi.json - BuiltinToolConfig: - properties: - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM if the tool call fails. - The LLM then uses this text to generate a response back to the user, - ensuring continuity in the conversation. - name: - type: BuiltInTool - source: - openapi: evi-openapi.json - ChatMessageToolResult: - discriminated: false - docs: Function call response from client. - union: - - type: ToolResponseMessage - - type: ToolErrorMessage - source: - openapi: evi-asyncapi.json - inline: true - ChatMessage: - properties: - content: - type: optional - docs: Transcript of the message. - role: - type: Role - docs: Role of who is providing the message. - tool_call: - type: optional - docs: Function call name and arguments. - tool_result: - type: optional - docs: Function call response from client. - source: - openapi: evi-asyncapi.json - ChatMetadata: - docs: >- - **The first message received after establishing a connection with EVI**, - containing important identifiers for the current Chat session. - - - Includes the Chat ID (which allows the Chat session to be tracked and - referenced) and the Chat Group ID (used to resume a Chat when passed in - the `resumed_chat_group_id` query parameter of a subsequent connection - request, allowing EVI to continue the conversation from where it left off - within the Chat Group). - properties: - chat_group_id: - type: string - docs: >- - ID of the Chat Group. - - - Used to resume a Chat when passed in the - [resumed_chat_group_id](/reference/speech-to-speech-evi/chat#request.query.resumed_chat_group_id) - query parameter of a subsequent connection request. This allows EVI to - continue the conversation from where it left off within the Chat - Group. - - - Learn more about [supporting chat - resumability](/docs/speech-to-speech-evi/faq#does-evi-support-chat-resumability) - from the EVI FAQ. - chat_id: - type: string - docs: >- - ID of the Chat session. Allows the Chat session to be tracked and - referenced. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - request_id: - type: optional - docs: ID of the initiating request. - type: - type: literal<"chat_metadata"> - docs: >- - The type of message sent through the socket; for a Chat Metadata - message, this must be `chat_metadata`. - - - The Chat Metadata message is the first message you receive after - establishing a connection with EVI and contains important identifiers - for the current Chat session. - source: - openapi: evi-asyncapi.json - Context: - properties: - text: - type: string - docs: >- - The context to be injected into the conversation. Helps inform the - LLM's response by providing relevant information about the ongoing - conversation. - - - This text will be appended to the end of - [user_messages](/reference/speech-to-speech-evi/chat#receive.UserMessage.message.content) - based on the chosen persistence level. For example, if you want to - remind EVI of its role as a helpful weather assistant, the context you - insert will be appended to the end of user messages as `{Context: You - are a helpful weather assistant}`. - type: - type: optional - docs: >- - The persistence level of the injected context. Specifies how long the - injected context will remain active in the session. - - - - **Temporary**: Context that is only applied to the following - assistant response. - - - - **Persistent**: Context that is applied to all subsequent assistant - responses for the remainder of the Chat. - source: - openapi: evi-openapi.json - ContextType: - enum: - - persistent - - temporary - source: - openapi: evi-openapi.json - EmotionScores: - properties: - Admiration: double - Adoration: double - Aesthetic Appreciation: double - Amusement: double - Anger: double - Anxiety: double - Awe: double - Awkwardness: double - Boredom: double - Calmness: double - Concentration: double - Confusion: double - Contemplation: double - Contempt: double - Contentment: double - Craving: double - Desire: double - Determination: double - Disappointment: double - Disgust: double - Distress: double - Doubt: double - Ecstasy: double - Embarrassment: double - Empathic Pain: double - Entrancement: double - Envy: double - Excitement: double - Fear: double - Guilt: double - Horror: double - Interest: double - Joy: double - Love: double - Nostalgia: double - Pain: double - Pride: double - Realization: double - Relief: double - Romance: double - Sadness: double - Satisfaction: double - Shame: double - Surprise (negative): double - Surprise (positive): double - Sympathy: double - Tiredness: double - Triumph: double - source: - openapi: evi-openapi.json - Encoding: - type: literal<"linear16"> - WebSocketError: - docs: >- - **Indicates a disruption in the WebSocket connection**, such as an - unexpected disconnection, protocol error, or data transmission issue. - - - Contains an error code identifying the type of error encountered, a - detailed description of the error, and a short, human-readable identifier - and description (slug) for the error. - properties: - code: - type: string - docs: Error code. Identifies the type of error encountered. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - message: - type: string - docs: Detailed description of the error. - request_id: - type: optional - docs: ID of the initiating request. - slug: - type: string - docs: >- - Short, human-readable identifier and description for the error. See a - complete list of error slugs on the [Errors - page](/docs/resources/errors). - type: - type: literal<"error"> - docs: >- - The type of message sent through the socket; for a Web Socket Error - message, this must be `error`. - - - This message indicates a disruption in the WebSocket connection, such - as an unexpected disconnection, protocol error, or data transmission - issue. - source: - openapi: evi-asyncapi.json - ErrorLevel: - type: literal<"warn"> - Inference: - properties: - prosody: - type: optional - docs: >- - Prosody model inference results. - - - EVI uses the prosody model to measure 48 emotions related to speech - and vocal characteristics within a given expression. - source: - openapi: evi-openapi.json - MillisecondInterval: - properties: - begin: - type: integer - docs: Start time of the interval in milliseconds. - end: - type: integer - docs: End time of the interval in milliseconds. - source: - openapi: evi-openapi.json - PauseAssistantMessage: - docs: >- - **Pause responses from EVI.** Chat history is still saved and sent after - resuming. Once this message is sent, EVI will not respond until a Resume - Assistant message is sent. - - - When paused, EVI won't respond, but transcriptions of your audio inputs - will still be recorded. See our [Pause Response - Guide](/docs/speech-to-speech-evi/features/pause-responses) for further - details. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - type: - type: literal<"pause_assistant_message"> - docs: >- - The type of message sent through the socket; must be - `pause_assistant_message` for our server to correctly identify and - process it as a Pause Assistant message. - - - Once this message is sent, EVI will not respond until a [Resume - Assistant - message](/reference/speech-to-speech-evi/chat#send.ResumeAssistantMessage) - is sent. When paused, EVI won't respond, but transcriptions of your - audio inputs will still be recorded. - source: - openapi: evi-openapi.json - ProsodyInference: - properties: - scores: - type: EmotionScores - docs: >- - The confidence scores for 48 emotions within the detected expression - of an audio sample. - - - Scores typically range from 0 to 1, with higher values indicating a - stronger confidence level in the measured attribute. - - - See our guide on [interpreting expression measurement - results](/docs/expression-measurement/faq#how-do-i-interpret-my-results) - to learn more. - source: - openapi: evi-openapi.json - ResumeAssistantMessage: - docs: >- - **Resume responses from EVI.** Chat history sent while paused will now be - sent. - - - Upon resuming, if any audio input was sent during the pause, EVI will - retain context from all messages sent but only respond to the last user - message. See our [Pause Response - Guide](/docs/speech-to-speech-evi/features/pause-responses) for further - details. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - type: - type: literal<"resume_assistant_message"> - docs: >- - The type of message sent through the socket; must be - `resume_assistant_message` for our server to correctly identify and - process it as a Resume Assistant message. - - - Upon resuming, if any audio input was sent during the pause, EVI will - retain context from all messages sent but only respond to the last - user message. (e.g., If you ask EVI two questions while paused and - then send a `resume_assistant_message`, EVI will respond to the second - question and have added the first question to its conversation - context.) - source: - openapi: evi-openapi.json - Role: - enum: - - assistant - - system - - user - - all - - tool - - context - source: - openapi: evi-openapi.json - SessionSettingsVariablesValue: - discriminated: false - union: - - string - - double - - boolean - source: - openapi: evi-openapi.json - inline: true - SessionSettings: - docs: >- - **Settings for this chat session.** Session settings are temporary and - apply only to the current Chat session. - - - These settings can be adjusted dynamically based on the requirements of - each session to ensure optimal performance and user experience. See our - [Session Settings - Guide](/docs/speech-to-speech-evi/configuration/session-settings) for a - complete list of configurable settings. - properties: - audio: - type: optional - docs: >- - Configuration details for the audio input used during the session. - Ensures the audio is being correctly set up for processing. - - - This optional field is only required when the audio input is encoded - in PCM Linear 16 (16-bit, little-endian, signed PCM WAV data). For - detailed instructions on how to configure session settings for PCM - Linear 16 audio, please refer to the [Session Settings - guide](/docs/speech-to-speech-evi/configuration/session-settings). - builtin_tools: - type: optional> - docs: >- - List of built-in tools to enable for the session. - - - Tools are resources used by EVI to perform various tasks, such as - searching the web or calling external APIs. Built-in tools, like web - search, are natively integrated, while user-defined tools are created - and invoked by the user. To learn more, see our [Tool Use - Guide](/docs/speech-to-speech-evi/features/tool-use). - - - Currently, the only built-in tool Hume provides is **Web Search**. - When enabled, Web Search equips EVI with the ability to search the web - for up-to-date information. - context: - type: optional - docs: >- - Field for injecting additional context into the conversation, which is - appended to the end of user messages for the session. - - - When included in a Session Settings message, the provided context can - be used to remind the LLM of its role in every user message, prevent - it from forgetting important details, or add new relevant information - to the conversation. - - - Set to `null` to clear injected context. - custom_session_id: - type: optional - docs: >- - Unique identifier for the session. Used to manage conversational - state, correlate frontend and backend data, and persist conversations - across EVI sessions. - - - If included, the response sent from Hume to your backend will include - this ID. This allows you to correlate frontend users with their - incoming messages. - - - It is recommended to pass a `custom_session_id` if you are using a - Custom Language Model. Please see our guide to [using a custom - language - model](/docs/speech-to-speech-evi/guides/custom-language-model) with - EVI to learn more. - language_model_api_key: - type: optional - docs: >- - Third party API key for the supplemental language model. - - - When provided, EVI will use this key instead of Hume's API key for the - supplemental LLM. This allows you to bypass rate limits and utilize - your own API key as needed. - metadata: optional> - system_prompt: - type: optional - docs: >- - Instructions used to shape EVI's behavior, responses, and style for - the session. - - - When included in a Session Settings message, the provided Prompt - overrides the existing one specified in the EVI configuration. If no - Prompt was defined in the configuration, this Prompt will be the one - used for the session. - - - You can use the Prompt to define a specific goal or role for EVI, - specifying how it should act or what it should focus on during the - conversation. For example, EVI can be instructed to act as a customer - support representative, a fitness coach, or a travel advisor, each - with its own set of behaviors and response styles. - - - For help writing a system prompt, see our [Prompting - Guide](/docs/speech-to-speech-evi/guides/prompting). - tools: - type: optional> - docs: >- - List of user-defined tools to enable for the session. - - - Tools are resources used by EVI to perform various tasks, such as - searching the web or calling external APIs. Built-in tools, like web - search, are natively integrated, while user-defined tools are created - and invoked by the user. To learn more, see our [Tool Use - Guide](/docs/speech-to-speech-evi/features/tool-use). - type: - type: literal<"session_settings"> - docs: >- - The type of message sent through the socket; must be - `session_settings` for our server to correctly identify and process it - as a Session Settings message. - - - Session settings are temporary and apply only to the current Chat - session. These settings can be adjusted dynamically based on the - requirements of each session to ensure optimal performance and user - experience. - - - For more information, please refer to the [Session Settings - guide](/docs/speech-to-speech-evi/configuration/session-settings). - variables: - type: optional> - docs: >- - This field allows you to assign values to dynamic variables referenced - in your system prompt. - - - Each key represents the variable name, and the corresponding value is - the specific content you wish to assign to that variable within the - session. While the values for variables can be strings, numbers, or - booleans, the value will ultimately be converted to a string when - injected into your system prompt. - - - Using this field, you can personalize responses based on - session-specific details. For more guidance, see our [guide on using - dynamic - variables](/docs/speech-to-speech-evi/features/dynamic-variables). - voice_id: - type: optional - docs: >- - Allows you to change the voice during an active chat. Updating the - voice does not affect chat context or conversation history. - source: - openapi: evi-openapi.json - Tool: - properties: - description: - type: optional - docs: >- - An optional description of what the tool does, used by the - supplemental LLM to choose when and how to call the function. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM if the tool call fails. - The LLM then uses this text to generate a response back to the user, - ensuring continuity in the conversation. - name: - type: string - docs: Name of the user-defined tool to be enabled. - parameters: - type: string - docs: >- - Parameters of the tool. Is a stringified JSON schema. - - - These parameters define the inputs needed for the tool's execution, - including the expected data type and description for each input field. - Structured as a JSON schema, this format ensures the tool receives - data in the expected format. - type: - type: ToolType - docs: Type of tool. Set to `function` for user-defined tools. - source: - openapi: evi-openapi.json - ToolCallMessage: - docs: When provided, the output is a tool call. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - name: - type: string - docs: Name of the tool called. - parameters: - type: string - docs: Parameters of the tool call. Is a stringified JSON schema. - response_required: - type: boolean - docs: >- - Indicates whether a response to the tool call is required from the - developer, either in the form of a [Tool Response - message](/reference/empathic-voice-interface-evi/chat/chat#send.Tool%20Response%20Message.type) - or a [Tool Error - message](/reference/empathic-voice-interface-evi/chat/chat#send.Tool%20Error%20Message.type). - tool_call_id: - type: string - docs: >- - The unique identifier for a specific tool call instance. - - - This ID is used to track the request and response of a particular tool - invocation, ensuring that the correct response is linked to the - appropriate request. - tool_type: - type: ToolType - docs: >- - Type of tool called. Either `builtin` for natively implemented tools, - like web search, or `function` for user-defined tools. - type: - type: optional> - docs: >- - The type of message sent through the socket; for a Tool Call message, - this must be `tool_call`. - - - This message indicates that the supplemental LLM has detected a need - to invoke the specified tool. - source: - openapi: evi-openapi.json - ToolErrorMessage: - docs: >- - **Error message from the tool call**, not exposed to the LLM or user. Upon - receiving a Tool Call message and failing to invoke the function, this - message is sent to notify EVI of the tool's failure. - - - For built-in tools implemented on the server, you will receive this - message type rather than a `ToolCallMessage` if the tool fails. See our - [Tool Use Guide](/docs/speech-to-speech-evi/features/tool-use) for further - details. - properties: - code: - type: optional - docs: Error code. Identifies the type of error encountered. - content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the tool errors. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - error: - type: string - docs: Error message from the tool call, not exposed to the LLM or user. - level: - type: optional - docs: >- - Indicates the severity of an error; for a Tool Error message, this - must be `warn` to signal an unexpected event. - tool_call_id: - type: string - docs: >- - The unique identifier for a specific tool call instance. - - - This ID is used to track the request and response of a particular tool - invocation, ensuring that the Tool Error message is linked to the - appropriate tool call request. The specified `tool_call_id` must match - the one received in the [Tool Call - message](/reference/speech-to-speech-evi/chat#receive.ToolCallMessage). - tool_type: - type: optional - docs: >- - Type of tool called. Either `builtin` for natively implemented tools, - like web search, or `function` for user-defined tools. - type: - type: literal<"tool_error"> - docs: >- - The type of message sent through the socket; for a Tool Error message, - this must be `tool_error`. - - - Upon receiving a [Tool Call - message](/reference/speech-to-speech-evi/chat#receive.ToolCallMessage) - and failing to invoke the function, this message is sent to notify EVI - of the tool's failure. - source: - openapi: evi-openapi.json - ToolResponseMessage: - docs: >- - **Return value of the tool call.** Contains the output generated by the - tool to pass back to EVI. Upon receiving a Tool Call message and - successfully invoking the function, this message is sent to convey the - result of the function call back to EVI. - - - For built-in tools implemented on the server, you will receive this - message type rather than a `ToolCallMessage`. See our [Tool Use - Guide](/docs/speech-to-speech-evi/features/tool-use) for further details. - properties: - content: - type: string - docs: >- - Return value of the tool call. Contains the output generated by the - tool to pass back to EVI. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - tool_call_id: - type: string - docs: >- - The unique identifier for a specific tool call instance. - - - This ID is used to track the request and response of a particular tool - invocation, ensuring that the correct response is linked to the - appropriate request. The specified `tool_call_id` must match the one - received in the [Tool Call - message](/reference/speech-to-speech-evi/chat#receive.ToolCallMessage.tool_call_id). - tool_name: - type: optional - docs: >- - Name of the tool. - - - Include this optional field to help the supplemental LLM identify - which tool generated the response. The specified `tool_name` must - match the one received in the [Tool Call - message](/reference/speech-to-speech-evi/chat#receive.ToolCallMessage). - tool_type: - type: optional - docs: >- - Type of tool called. Either `builtin` for natively implemented tools, - like web search, or `function` for user-defined tools. - type: - type: literal<"tool_response"> - docs: >- - The type of message sent through the socket; for a Tool Response - message, this must be `tool_response`. - - - Upon receiving a [Tool Call - message](/reference/speech-to-speech-evi/chat#receive.ToolCallMessage) - and successfully invoking the function, this message is sent to convey - the result of the function call back to EVI. - source: - openapi: evi-openapi.json - ToolType: - enum: - - builtin - - function - source: - openapi: evi-openapi.json - UserInput: - docs: >- - **User text to insert into the conversation.** Text sent through a User - Input message is treated as the user's speech to EVI. EVI processes this - input and provides a corresponding response. - - - Expression measurement results are not available for User Input messages, - as the prosody model relies on audio input and cannot process text alone. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - text: - type: string - docs: >- - User text to insert into the conversation. Text sent through a User - Input message is treated as the user's speech to EVI. EVI processes - this input and provides a corresponding response. - type: - type: literal<"user_input"> - docs: >- - The type of message sent through the socket; must be `user_input` for - our server to correctly identify and process it as a User Input - message. - source: - openapi: evi-openapi.json - UserInterruption: - docs: >- - **Indicates the user has interrupted the assistant's response.** EVI - detects the interruption in real-time and sends this message to signal the - interruption event. - - - This message allows the system to stop the current audio playback, clear - the audio queue, and prepare to handle new user input. Contains a Unix - timestamp of when the user interruption was detected. For more details, - see our [Interruptibility - Guide](/docs/speech-to-speech-evi/features/interruptibility) - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - time: - type: integer - docs: Unix timestamp of the detected user interruption. - type: - type: literal<"user_interruption"> - docs: >- - The type of message sent through the socket; for a User Interruption - message, this must be `user_interruption`. - - - This message indicates the user has interrupted the assistant's - response. EVI detects the interruption in real-time and sends this - message to signal the interruption event. This message allows the - system to stop the current audio playback, clear the audio queue, and - prepare to handle new user input. - source: - openapi: evi-asyncapi.json - UserMessage: - docs: >- - **Transcript of the user's message.** Contains the message role and - content, along with a `from_text` field indicating if this message was - inserted into the conversation as text from a `UserInput` message. - - - Includes an `interim` field indicating whether the transcript is - provisional (words may be repeated or refined in subsequent `UserMessage` - responses as additional audio is processed) or final and complete. Interim - transcripts are only sent when the `verbose_transcription` query parameter - is set to true in the initial handshake. - properties: - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - from_text: - type: boolean - docs: >- - Indicates if this message was inserted into the conversation as text - from a [User - Input](/reference/speech-to-speech-evi/chat#send.UserInput.text) - message. - interim: - type: boolean - docs: >- - Indicates whether this `UserMessage` contains an interim (unfinalized) - transcript. - - - - `true`: the transcript is provisional; words may be repeated or - refined in subsequent `UserMessage` responses as additional audio is - processed. - - - `false`: the transcript is final and complete. - - - Interim transcripts are only sent when the - [`verbose_transcription`](/reference/speech-to-speech-evi/chat#request.query.verbose_transcription) - query parameter is set to `true` in the initial handshake. - language: - type: optional - docs: Detected language of the message text. - message: - type: ChatMessage - docs: Transcript of the message. - models: - type: Inference - docs: Inference model results. - time: - type: MillisecondInterval - docs: Start and End time of user message. - type: - type: literal<"user_message"> - docs: >- - The type of message sent through the socket; for a User Message, this - must be `user_message`. - - - This message contains both a transcript of the user's input and the - expression measurement predictions if the input was sent as an [Audio - Input message](/reference/speech-to-speech-evi/chat#send.AudioInput). - Expression measurement predictions are not provided for a [User Input - message](/reference/speech-to-speech-evi/chat#send.UserInput), as the - prosody model relies on audio input and cannot process text alone. - source: - openapi: evi-asyncapi.json - SubscribeEvent: - discriminated: false - union: - - type: AssistantEnd - docs: >- - **Indicates the conclusion of the assistant's response**, signaling - that the assistant has finished speaking for the current - conversational turn. - - type: AssistantMessage - docs: >- - **Transcript of the assistant's message.** Contains the message role, - content, and optionally tool call information including the tool name, - parameters, response requirement status, tool call ID, and tool type. - - type: AssistantProsody - docs: >- - **Expression measurement predictions of the assistant's audio - output.** Contains inference model results including prosody scores - for 48 emotions within the detected expression of the assistant's - audio sample. - - type: AudioOutput - docs: >- - **Base64 encoded audio output.** This encoded audio is transmitted to - the client, where it can be decoded and played back as part of the - user interaction. The returned audio format is WAV and the sample rate - is 48kHz. - - - Contains the audio data, an ID to track and reference the audio - output, and an index indicating the chunk position relative to the - whole audio segment. See our [Audio - Guide](/docs/speech-to-speech-evi/guides/audio) for more details on - preparing and processing audio. - - type: ChatMetadata - docs: >- - **The first message received after establishing a connection with - EVI**, containing important identifiers for the current Chat session. - - - Includes the Chat ID (which allows the Chat session to be tracked and - referenced) and the Chat Group ID (used to resume a Chat when passed - in the `resumed_chat_group_id` query parameter of a subsequent - connection request, allowing EVI to continue the conversation from - where it left off within the Chat Group). - - type: WebSocketError - docs: >- - **Indicates a disruption in the WebSocket connection**, such as an - unexpected disconnection, protocol error, or data transmission issue. - - - Contains an error code identifying the type of error encountered, a - detailed description of the error, and a short, human-readable - identifier and description (slug) for the error. - - type: UserInterruption - docs: >- - **Indicates the user has interrupted the assistant's response.** EVI - detects the interruption in real-time and sends this message to signal - the interruption event. - - - This message allows the system to stop the current audio playback, - clear the audio queue, and prepare to handle new user input. Contains - a Unix timestamp of when the user interruption was detected. For more - details, see our [Interruptibility - Guide](/docs/speech-to-speech-evi/features/interruptibility) - - type: UserMessage - docs: >- - **Transcript of the user's message.** Contains the message role and - content, along with a `from_text` field indicating if this message was - inserted into the conversation as text from a `UserInput` message. - - - Includes an `interim` field indicating whether the transcript is - provisional (words may be repeated or refined in subsequent - `UserMessage` responses as additional audio is processed) or final and - complete. Interim transcripts are only sent when the - `verbose_transcription` query parameter is set to true in the initial - handshake. - - type: ToolCallMessage - docs: >- - **Indicates that the supplemental LLM has detected a need to invoke - the specified tool.** This message is only received for user-defined - function tools. - - - Contains the tool name, parameters (as a stringified JSON schema), - whether a response is required from the developer (either in the form - of a `ToolResponseMessage` or a `ToolErrorMessage`), the unique tool - call ID for tracking the request and response, and the tool type. See - our [Tool Use Guide](/docs/speech-to-speech-evi/features/tool-use) for - further details. - - type: ToolResponseMessage - docs: >- - **Return value of the tool call.** Contains the output generated by - the tool to pass back to EVI. Upon receiving a Tool Call message and - successfully invoking the function, this message is sent to convey the - result of the function call back to EVI. - - - For built-in tools implemented on the server, you will receive this - message type rather than a `ToolCallMessage`. See our [Tool Use - Guide](/docs/speech-to-speech-evi/features/tool-use) for further - details. - - type: ToolErrorMessage - docs: >- - **Error message from the tool call**, not exposed to the LLM or user. - Upon receiving a Tool Call message and failing to invoke the function, - this message is sent to notify EVI of the tool's failure. - - - For built-in tools implemented on the server, you will receive this - message type rather than a `ToolCallMessage` if the tool fails. See - our [Tool Use Guide](/docs/speech-to-speech-evi/features/tool-use) for - further details. - - type: SessionSettings - docs: >- - **Settings for this chat session.** Session settings are temporary and - apply only to the current Chat session. - - - These settings can be adjusted dynamically based on the requirements - of each session to ensure optimal performance and user experience. See - our [Session Settings - Guide](/docs/speech-to-speech-evi/configuration/session-settings) for - a complete list of configurable settings. - source: - openapi: evi-asyncapi.json - JsonMessage: - discriminated: false - union: - - type: AssistantEnd - docs: >- - **Indicates the conclusion of the assistant's response**, signaling - that the assistant has finished speaking for the current - conversational turn. - - type: AssistantMessage - docs: >- - **Transcript of the assistant's message.** Contains the message role, - content, and optionally tool call information including the tool name, - parameters, response requirement status, tool call ID, and tool type. - - type: AssistantProsody - docs: >- - **Expression measurement predictions of the assistant's audio - output.** Contains inference model results including prosody scores - for 48 emotions within the detected expression of the assistant's - audio sample. - - type: ChatMetadata - docs: >- - **The first message received after establishing a connection with - EVI**, containing important identifiers for the current Chat session. - - - Includes the Chat ID (which allows the Chat session to be tracked and - referenced) and the Chat Group ID (used to resume a Chat when passed - in the `resumed_chat_group_id` query parameter of a subsequent - connection request, allowing EVI to continue the conversation from - where it left off within the Chat Group). - - type: WebSocketError - docs: >- - **Indicates a disruption in the WebSocket connection**, such as an - unexpected disconnection, protocol error, or data transmission issue. - - - Contains an error code identifying the type of error encountered, a - detailed description of the error, and a short, human-readable - identifier and description (slug) for the error. - - type: UserInterruption - docs: >- - **Indicates the user has interrupted the assistant's response.** EVI - detects the interruption in real-time and sends this message to signal - the interruption event. - - - This message allows the system to stop the current audio playback, - clear the audio queue, and prepare to handle new user input. Contains - a Unix timestamp of when the user interruption was detected. For more - details, see our [Interruptibility - Guide](/docs/speech-to-speech-evi/features/interruptibility) - - type: UserMessage - docs: >- - **Transcript of the user's message.** Contains the message role and - content, along with a `from_text` field indicating if this message was - inserted into the conversation as text from a `UserInput` message. - - - Includes an `interim` field indicating whether the transcript is - provisional (words may be repeated or refined in subsequent - `UserMessage` responses as additional audio is processed) or final and - complete. Interim transcripts are only sent when the - `verbose_transcription` query parameter is set to true in the initial - handshake. - - type: ToolCallMessage - docs: >- - **Indicates that the supplemental LLM has detected a need to invoke - the specified tool.** This message is only received for user-defined - function tools. - - - Contains the tool name, parameters (as a stringified JSON schema), - whether a response is required from the developer (either in the form - of a `ToolResponseMessage` or a `ToolErrorMessage`), the unique tool - call ID for tracking the request and response, and the tool type. See - our [Tool Use Guide](/docs/speech-to-speech-evi/features/tool-use) for - further details. - - type: ToolResponseMessage - docs: >- - **Return value of the tool call.** Contains the output generated by - the tool to pass back to EVI. Upon receiving a Tool Call message and - successfully invoking the function, this message is sent to convey the - result of the function call back to EVI. - - - For built-in tools implemented on the server, you will receive this - message type rather than a `ToolCallMessage`. See our [Tool Use - Guide](/docs/speech-to-speech-evi/features/tool-use) for further - details. - - type: ToolErrorMessage - docs: >- - **Error message from the tool call**, not exposed to the LLM or user. - Upon receiving a Tool Call message and failing to invoke the function, - this message is sent to notify EVI of the tool's failure. - - - For built-in tools implemented on the server, you will receive this - message type rather than a `ToolCallMessage` if the tool fails. See - our [Tool Use Guide](/docs/speech-to-speech-evi/features/tool-use) for - further details. - - type: SessionSettings - docs: >- - **Settings for this chat session.** Session settings are temporary and - apply only to the current Chat session. - - - These settings can be adjusted dynamically based on the requirements - of each session to ensure optimal performance and user experience. See - our [Session Settings - Guide](/docs/speech-to-speech-evi/configuration/session-settings) for - a complete list of configurable settings. - source: - openapi: evi-asyncapi.json - ConnectSessionSettingsAudio: - docs: >- - Configuration details for the audio input used during the session. Ensures - the audio is being correctly set up for processing. - - - This optional field is only required when the audio input is encoded in - PCM Linear 16 (16-bit, little-endian, signed PCM WAV data). For detailed - instructions on how to configure session settings for PCM Linear 16 audio, - please refer to the [Session Settings - section](/docs/empathic-voice-interface-evi/configuration#session-settings) - on the EVI Configuration page. - properties: - channels: - type: optional - docs: Sets number of audio channels for audio input. - encoding: - type: optional - docs: Sets encoding format of the audio input, such as `linear16`. - sample_rate: - type: optional - docs: >- - Sets the sample rate for audio input. (Number of samples per second in - the audio input, measured in Hertz.) - source: - openapi: evi-asyncapi.json - inline: true - ConnectSessionSettingsContext: - docs: >- - Allows developers to inject additional context into the conversation, - which is appended to the end of user messages for the session. - - - When included in a Session Settings message, the provided context can be - used to remind the LLM of its role in every user message, prevent it from - forgetting important details, or add new relevant information to the - conversation. - - - Set to `null` to disable context injection. - properties: - text: - type: optional - docs: >- - The context to be injected into the conversation. Helps inform the - LLM's response by providing relevant information about the ongoing - conversation. - - - This text will be appended to the end of - [user_messages](/reference/speech-to-speech-evi/chat#receive.UserMessage.message.content) - based on the chosen persistence level. For example, if you want to - remind EVI of its role as a helpful weather assistant, the context you - insert will be appended to the end of user messages as `{Context: You - are a helpful weather assistant}`. - type: - type: optional - docs: >- - The persistence level of the injected context. Specifies how long the - injected context will remain active in the session. - - - - **Temporary**: Context that is only applied to the following - assistant response. - - - - **Persistent**: Context that is applied to all subsequent assistant - responses for the remainder of the Chat. - source: - openapi: evi-asyncapi.json - inline: true - ConnectSessionSettingsVariablesValue: - discriminated: false - union: - - string - - double - - boolean - source: - openapi: evi-asyncapi.json - inline: true - ConnectSessionSettings: - properties: - audio: - type: optional - docs: >- - Configuration details for the audio input used during the session. - Ensures the audio is being correctly set up for processing. - - - This optional field is only required when the audio input is encoded - in PCM Linear 16 (16-bit, little-endian, signed PCM WAV data). For - detailed instructions on how to configure session settings for PCM - Linear 16 audio, please refer to the [Session Settings - section](/docs/empathic-voice-interface-evi/configuration#session-settings) - on the EVI Configuration page. - context: - type: optional - docs: >- - Allows developers to inject additional context into the conversation, - which is appended to the end of user messages for the session. - - - When included in a Session Settings message, the provided context can - be used to remind the LLM of its role in every user message, prevent - it from forgetting important details, or add new relevant information - to the conversation. - - - Set to `null` to disable context injection. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - event_limit: - type: optional - docs: >- - The maximum number of chat events to return from chat history. By - default, the system returns up to 300 events (100 events per page × 3 - pages). Set this parameter to a smaller value to limit the number of - events returned. - language_model_api_key: - type: optional - docs: >- - Third party API key for the supplemental language model. - - - When provided, EVI will use this key instead of Hume's API key for the - supplemental LLM. This allows you to bypass rate limits and utilize - your own API key as needed. - system_prompt: - type: optional - docs: >- - Instructions used to shape EVI's behavior, responses, and style for - the session. - - - When included in a Session Settings message, the provided Prompt - overrides the existing one specified in the EVI configuration. If no - Prompt was defined in the configuration, this Prompt will be the one - used for the session. - - - You can use the Prompt to define a specific goal or role for EVI, - specifying how it should act or what it should focus on during the - conversation. For example, EVI can be instructed to act as a customer - support representative, a fitness coach, or a travel advisor, each - with its own set of behaviors and response styles. - - - For help writing a system prompt, see our [Prompting - Guide](/docs/speech-to-speech-evi/guides/prompting). - voice_id: - type: optional - docs: >- - The name or ID of the voice from the `Voice Library` to be used as the - speaker for this EVI session. This will override the speaker set in - the selected configuration. - variables: - type: optional> - docs: >- - This field allows you to assign values to dynamic variables referenced - in your system prompt. - - - Each key represents the variable name, and the corresponding value is - the specific content you wish to assign to that variable within the - session. While the values for variables can be strings, numbers, or - booleans, the value will ultimately be converted to a string when - injected into your system prompt. - - - When used in query parameters, specify each variable using bracket - notation: `session_settings[variables][key]=value`. For example: - `session_settings[variables][name]=John&session_settings[variables][age]=30`. - - - Using this field, you can personalize responses based on - session-specific details. For more guidance, see our [guide on using - dynamic - variables](/docs/speech-to-speech-evi/features/dynamic-variables). - source: - openapi: evi-asyncapi.json - ControlPlanePublishEvent: - discriminated: false - union: - - type: SessionSettings - docs: >- - **Settings for this chat session.** Session settings are temporary and - apply only to the current Chat session. - - - These settings can be adjusted dynamically based on the requirements - of each session to ensure optimal performance and user experience. See - our [Session Settings - Guide](/docs/speech-to-speech-evi/configuration/session-settings) for - a complete list of configurable settings. - - type: UserInput - docs: >- - **User text to insert into the conversation.** Text sent through a - User Input message is treated as the user's speech to EVI. EVI - processes this input and provides a corresponding response. - - - Expression measurement results are not available for User Input - messages, as the prosody model relies on audio input and cannot - process text alone. - - type: AssistantInput - docs: >- - **Assistant text to synthesize into spoken audio and insert into the - conversation.** EVI uses this text to generate spoken audio using our - proprietary expressive text-to-speech model. - - - Our model adds appropriate emotional inflections and tones to the text - based on the user's expressions and the context of the conversation. - The synthesized audio is streamed back to the user as an Assistant - Message. - - type: ToolResponseMessage - docs: >- - **Return value of the tool call.** Contains the output generated by - the tool to pass back to EVI. Upon receiving a Tool Call message and - successfully invoking the function, this message is sent to convey the - result of the function call back to EVI. - - - For built-in tools implemented on the server, you will receive this - message type rather than a `ToolCallMessage`. See our [Tool Use - Guide](/docs/speech-to-speech-evi/features/tool-use) for further - details. - - type: ToolErrorMessage - docs: >- - **Error message from the tool call**, not exposed to the LLM or user. - Upon receiving a Tool Call message and failing to invoke the function, - this message is sent to notify EVI of the tool's failure. - - - For built-in tools implemented on the server, you will receive this - message type rather than a `ToolCallMessage` if the tool fails. See - our [Tool Use Guide](/docs/speech-to-speech-evi/features/tool-use) for - further details. - - type: PauseAssistantMessage - docs: >- - **Pause responses from EVI.** Chat history is still saved and sent - after resuming. Once this message is sent, EVI will not respond until - a Resume Assistant message is sent. - - - When paused, EVI won't respond, but transcriptions of your audio - inputs will still be recorded. See our [Pause Response - Guide](/docs/speech-to-speech-evi/features/pause-responses) for - further details. - - type: ResumeAssistantMessage - docs: >- - **Resume responses from EVI.** Chat history sent while paused will now - be sent. - - - Upon resuming, if any audio input was sent during the pause, EVI will - retain context from all messages sent but only respond to the last - user message. See our [Pause Response - Guide](/docs/speech-to-speech-evi/features/pause-responses) for - further details. - source: - openapi: evi-openapi.json - ErrorResponse: - properties: - code: optional - error: optional - message: optional - source: - openapi: evi-openapi.json - HTTPValidationError: - properties: - detail: - type: optional> - source: - openapi: evi-openapi.json - LanguageModelType: - enum: - - value: claude-3-7-sonnet-latest - name: Claude37SonnetLatest - - value: claude-3-5-sonnet-latest - name: Claude35SonnetLatest - - value: claude-3-5-haiku-latest - name: Claude35HaikuLatest - - value: claude-3-5-sonnet-20240620 - name: Claude35Sonnet20240620 - - value: claude-3-opus-20240229 - name: Claude3Opus20240229 - - value: claude-3-sonnet-20240229 - name: Claude3Sonnet20240229 - - value: claude-3-haiku-20240307 - name: Claude3Haiku20240307 - - value: claude-sonnet-4-20250514 - name: ClaudeSonnet420250514 - - value: claude-sonnet-4-5-20250929 - name: ClaudeSonnet4520250929 - - value: claude-haiku-4-5-20251001 - name: ClaudeHaiku4520251001 - - value: us.anthropic.claude-3-5-haiku-20241022-v1:0 - name: UsAnthropicClaude35Haiku20241022V10 - - value: us.anthropic.claude-3-5-sonnet-20240620-v1:0 - name: UsAnthropicClaude35Sonnet20240620V10 - - value: us.anthropic.claude-3-haiku-20240307-v1:0 - name: UsAnthropicClaude3Haiku20240307V10 - - value: gpt-oss-120b - name: GptOss120B - - value: qwen-3-235b-a22b - name: Qwen3235BA22B - - value: qwen-3-235b-a22b-instruct-2507 - name: Qwen3235BA22BInstruct2507 - - value: qwen-3-235b-a22b-thinking-2507 - name: Qwen3235BA22BThinking2507 - - value: gemini-1.5-pro - name: Gemini15Pro - - value: gemini-1.5-flash - name: Gemini15Flash - - value: gemini-1.5-pro-002 - name: Gemini15Pro002 - - value: gemini-1.5-flash-002 - name: Gemini15Flash002 - - value: gemini-2.0-flash - name: Gemini20Flash - - value: gemini-2.5-flash - name: Gemini25Flash - - value: gemini-2.5-flash-preview-04-17 - name: Gemini25FlashPreview0417 - - value: gpt-4-turbo - name: Gpt4Turbo - - value: gpt-4-turbo-preview - name: Gpt4TurboPreview - - value: gpt-3.5-turbo-0125 - name: Gpt35Turbo0125 - - value: gpt-3.5-turbo - name: Gpt35Turbo - - value: gpt-4o - name: Gpt4O - - value: gpt-4o-mini - name: Gpt4OMini - - value: gpt-4.1 - name: Gpt41 - - value: gpt-5 - name: Gpt5 - - value: gpt-5-mini - name: Gpt5Mini - - value: gpt-5-nano - name: Gpt5Nano - - value: gpt-4o-priority - name: Gpt4OPriority - - value: gpt-4o-mini-priority - name: Gpt4OMiniPriority - - value: gpt-4.1-priority - name: Gpt41Priority - - value: gpt-5-priority - name: Gpt5Priority - - value: gpt-5-mini-priority - name: Gpt5MiniPriority - - value: gpt-5-nano-priority - name: Gpt5NanoPriority - - value: gemma-7b-it - name: Gemma7BIt - - value: llama3-8b-8192 - name: Llama38B8192 - - value: llama3-70b-8192 - name: Llama370B8192 - - value: llama-3.1-70b-versatile - name: Llama3170BVersatile - - value: llama-3.3-70b-versatile - name: Llama3370BVersatile - - value: llama-3.1-8b-instant - name: Llama318BInstant - - value: moonshotai/kimi-k2-instruct - name: MoonshotaiKimiK2Instruct - - value: accounts/fireworks/models/mixtral-8x7b-instruct - name: AccountsFireworksModelsMixtral8X7BInstruct - - value: accounts/fireworks/models/llama-v3p1-405b-instruct - name: AccountsFireworksModelsLlamaV3P1405BInstruct - - value: accounts/fireworks/models/llama-v3p1-70b-instruct - name: AccountsFireworksModelsLlamaV3P170BInstruct - - value: accounts/fireworks/models/llama-v3p1-8b-instruct - name: AccountsFireworksModelsLlamaV3P18BInstruct - - sonar - - value: sonar-pro - name: SonarPro - - sambanova - - value: DeepSeek-R1-Distill-Llama-70B - name: DeepSeekR1DistillLlama70B - - value: Llama-4-Maverick-17B-128E-Instruct - name: Llama4Maverick17B128EInstruct - - value: Qwen3-32B - name: Qwen332B - - value: grok-4-fast-non-reasoning-latest - name: Grok4FastNonReasoningLatest - - ellm - - value: custom-language-model - name: CustomLanguageModel - - value: hume-evi-3-web-search - name: HumeEvi3WebSearch - source: - openapi: evi-openapi.json - ModelProviderEnum: - enum: - - GROQ - - OPEN_AI - - FIREWORKS - - ANTHROPIC - - CUSTOM_LANGUAGE_MODEL - - GOOGLE - - HUME_AI - - AMAZON_BEDROCK - - PERPLEXITY - - SAMBANOVA - - CEREBRAS - source: - openapi: evi-openapi.json - ReturnChatAudioReconstructionStatus: - enum: - - QUEUED - - IN_PROGRESS - - COMPLETE - - ERROR - - CANCELLED - docs: >- - Indicates the current state of the audio reconstruction job. There are - five possible statuses: - - - - `QUEUED`: The reconstruction job is waiting to be processed. - - - - `IN_PROGRESS`: The reconstruction is currently being processed. - - - - `COMPLETE`: The audio reconstruction is finished and ready for download. - - - - `ERROR`: An error occurred during the reconstruction process. - - - - `CANCELED`: The reconstruction job has been canceled. - inline: true - source: - openapi: evi-openapi.json - ReturnChatAudioReconstruction: - docs: >- - List of chat audio reconstructions returned for the specified page number - and page size. - properties: - filename: - type: optional - docs: Name of the chat audio reconstruction file. - id: - type: string - docs: Identifier for the chat. Formatted as a UUID. - modified_at: - type: optional - docs: >- - The timestamp of the most recent status change for this audio - reconstruction, formatted milliseconds since the Unix epoch. - signed_audio_url: - type: optional - docs: Signed URL used to download the chat audio reconstruction file. - signed_url_expiration_timestamp_millis: - type: optional - docs: >- - The timestamp when the signed URL will expire, formatted as a Unix - epoch milliseconds. - status: - type: ReturnChatAudioReconstructionStatus - docs: >- - Indicates the current state of the audio reconstruction job. There are - five possible statuses: - - - - `QUEUED`: The reconstruction job is waiting to be processed. - - - - `IN_PROGRESS`: The reconstruction is currently being processed. - - - - `COMPLETE`: The audio reconstruction is finished and ready for - download. - - - - `ERROR`: An error occurred during the reconstruction process. - - - - `CANCELED`: The reconstruction job has been canceled. - user_id: - type: string - docs: Identifier for the user that owns this chat. Formatted as a UUID. - source: - openapi: evi-openapi.json - ValidationErrorLocItem: - discriminated: false - union: - - string - - integer - source: - openapi: evi-openapi.json - inline: true - ValidationError: - properties: - loc: - type: list - msg: string - type: string - source: - openapi: evi-openapi.json - WebhookEventBase: - docs: Represents the fields common to all webhook events. - properties: - chat_group_id: - type: string - docs: Unique ID of the **Chat Group** associated with the **Chat** session. - chat_id: - type: string - docs: Unique ID of the **Chat** session. - config_id: - type: optional - docs: Unique ID of the EVI **Config** used for the session. - source: - openapi: evi-openapi.json - WebhookEvent: - discriminated: false - union: - - WebhookEventChatStarted - - WebhookEventChatEnded - - WebhookEventToolCall - source: - openapi: evi-openapi.json - WebhookEventChatEnded: - properties: - caller_number: - type: optional - docs: >- - Phone number of the caller in E.164 format (e.g., `+12223333333`). - This field is included only if the Chat was created via the [Twilio - phone calling](/docs/empathic-voice-interface-evi/phone-calling) - integration. - custom_session_id: - type: optional - docs: >- - User-defined session ID. Relevant only when employing a [custom - language - model](/docs/empathic-voice-interface-evi/custom-language-model) in - the EVI Config. - duration_seconds: - type: integer - docs: Total duration of the session in seconds. - end_reason: - type: WebhookEventChatStatus - docs: Reason for the session's termination. - end_time: - type: integer - docs: Unix timestamp (in milliseconds) indicating when the session ended. - event_name: - type: optional> - docs: Always `chat_ended`. - extends: - - WebhookEventBase - source: - openapi: evi-openapi.json - WebhookEventChatStartType: - enum: - - new_chat_group - - resumed_chat_group - source: - openapi: evi-openapi.json - WebhookEventChatStarted: - properties: - caller_number: - type: optional - docs: >- - Phone number of the caller in E.164 format (e.g., `+12223333333`). - This field is included only if the Chat was created via the [Twilio - phone calling](/docs/empathic-voice-interface-evi/phone-calling) - integration. - chat_start_type: - type: WebhookEventChatStartType - docs: >- - Indicates whether the chat is the first in a new Chat Group - (`new_chat_group`) or the continuation of an existing chat group - (`resumed_chat_group`). - custom_session_id: - type: optional - docs: >- - User-defined session ID. Relevant only when employing a [custom - language - model](/docs/empathic-voice-interface-evi/custom-language-model) in - the EVI Config. - event_name: - type: optional> - docs: Always `chat_started`. - start_time: - type: integer - docs: Unix timestamp (in milliseconds) indicating when the session started. - extends: - - WebhookEventBase - source: - openapi: evi-openapi.json - WebhookEventChatStatus: - enum: - - ACTIVE - - USER_ENDED - - USER_TIMEOUT - - INACTIVITY_TIMEOUT - - MAX_DURATION_TIMEOUT - - SILENCE_TIMEOUT - - ERROR - source: - openapi: evi-openapi.json - WebhookEventToolCall: - properties: - caller_number: - type: optional - docs: >- - Phone number of the caller in E.164 format (e.g., `+12223333333`). - This field is included only if the Chat was created via the [Twilio - phone calling](/docs/empathic-voice-interface-evi/phone-calling) - integration. - custom_session_id: - type: optional - docs: >- - User-defined session ID. Relevant only when employing a [custom - language - model](/docs/empathic-voice-interface-evi/custom-language-model) in - the EVI Config. - event_name: - type: optional> - docs: Always `tool_call`. - timestamp: - type: integer - docs: >- - Unix timestamp (in milliseconds) indicating when the tool call was - triggered. - tool_call_message: - type: ToolCallMessage - docs: The tool call. - extends: - - WebhookEventBase - source: - openapi: evi-openapi.json - PostedBuiltinToolName: - enum: - - web_search - - hang_up - docs: >- - Name of the built-in tool to use. Hume supports the following built-in - tools: - - - - **web_search:** enables EVI to search the web for up-to-date information - when applicable. - - - **hang_up:** closes the WebSocket connection when appropriate (e.g., - after detecting a farewell in the conversation). - - - For more information, see our guide on [using built-in - tools](/docs/speech-to-speech-evi/features/tool-use#using-built-in-tools). - inline: true - source: - openapi: evi-openapi.json - PostedBuiltinTool: - docs: A configuration of a built-in tool to be posted to the server - properties: - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the Tool errors. - name: - type: PostedBuiltinToolName - docs: >- - Name of the built-in tool to use. Hume supports the following built-in - tools: - - - - **web_search:** enables EVI to search the web for up-to-date - information when applicable. - - - **hang_up:** closes the WebSocket connection when appropriate (e.g., - after detecting a farewell in the conversation). - - - For more information, see our guide on [using built-in - tools](/docs/speech-to-speech-evi/features/tool-use#using-built-in-tools). - source: - openapi: evi-openapi.json - PostedConfigPromptSpec: - docs: >- - Identifies which prompt to use in a a config OR how to create a new prompt - to use in the config - properties: - id: - type: optional - docs: Identifier for a Prompt. Formatted as a UUID. - text: - type: optional - docs: Text used to create a new prompt for a particular config. - version: - type: optional - docs: >- - Version number for a Prompt. Version numbers should be integers. The - combination of configId and version number is unique. - source: - openapi: evi-openapi.json - PostedEllmModel: - docs: A eLLM model configuration to be posted to the server - properties: - allow_short_responses: - type: optional - docs: |- - Boolean indicating if the eLLM is allowed to generate short responses. - - If omitted, short responses from the eLLM are enabled by default. - source: - openapi: evi-openapi.json - PostedEventMessageSpec: - docs: Settings for a specific event_message to be posted to the server - properties: - enabled: - type: boolean - docs: >- - Boolean indicating if this event message is enabled. - - - If set to `true`, a message will be sent when the circumstances for - the specific event are met. - text: - type: optional - docs: >- - Text to use as the event message when the corresponding event occurs. - If no text is specified, EVI will generate an appropriate message - based on its current context and the system prompt. - source: - openapi: evi-openapi.json - PostedEventMessageSpecs: - docs: >- - Collection of event messages returned by the server. - - - Event messages are sent by the server when specific events occur during a - chat session. These messages are used to configure behaviors for EVI, such - as controlling how EVI starts a new conversation. - properties: - on_inactivity_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is about to be - disconnected due to a user inactivity timeout, such as a message - mentioning a lack of user input for a period of time. - - - Enabling an inactivity message allows developers to use this message - event for "checking in" with the user if they are not responding to - see if they are still active. - - - If the user does not respond in the number of seconds specified in the - `inactivity_timeout` field, then EVI will say the message and the user - has 15 seconds to respond. If they respond in time, the conversation - will continue; if not, the conversation will end. - - - However, if the inactivity message is not enabled, then reaching the - inactivity timeout will immediately end the connection. - on_max_duration_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is disconnected due - to reaching the maximum chat duration, such as a message mentioning - the time limit for the chat has been reached. - on_new_chat: - type: optional - docs: >- - Specifies the initial message EVI provides when a new chat is started, - such as a greeting or welcome message. - source: - openapi: evi-openapi.json - PostedLanguageModel: - docs: A LanguageModel to be posted to the server - properties: - model_provider: - type: optional - docs: The provider of the supplemental language model. - model_resource: - type: optional - docs: String that specifies the language model to use with `model_provider`. - temperature: - type: optional - docs: >- - The model temperature, with values between 0 to 1 (inclusive). - - - Controls the randomness of the LLM's output, with values closer to 0 - yielding focused, deterministic responses and values closer to 1 - producing more creative, diverse responses. - source: - openapi: evi-openapi.json - PostedNudgeSpec: - docs: A nudge specification posted to the server - properties: - enabled: - type: optional - docs: >- - If true, EVI will 'nudge' the user to speak after a determined - interval of silence. - interval_secs: - type: optional - docs: The interval of inactivity (in seconds) before a nudge is triggered. - source: - openapi: evi-openapi.json - PostedTimeoutSpec: - docs: Settings for a specific timeout to be posted to the server - properties: - duration_secs: - type: optional - docs: Duration in seconds for the timeout. - enabled: - type: boolean - docs: Boolean indicating if this event message is enabled. - source: - openapi: evi-openapi.json - PostedTimeoutSpecsInactivity: - docs: >- - Specifies the duration of user inactivity (in seconds) after which the EVI - WebSocket connection will be automatically disconnected. Default is 600 - seconds (10 minutes). - - - Accepts a minimum value of 30 seconds and a maximum value of 1,800 - seconds. - properties: - duration_secs: - type: optional - docs: >- - Duration in seconds for the timeout (e.g. 600 seconds represents 10 - minutes). - enabled: - type: boolean - docs: >- - Boolean indicating if this timeout is enabled. - - - If set to false, EVI will not timeout due to a specified duration of - user inactivity being reached. However, the conversation will - eventually disconnect after 1,800 seconds (30 minutes), which is the - maximum WebSocket duration limit for EVI. - source: - openapi: evi-openapi.json - inline: true - PostedTimeoutSpecsMaxDuration: - docs: >- - Specifies the maximum allowed duration (in seconds) for an EVI WebSocket - connection before it is automatically disconnected. Default is 1,800 - seconds (30 minutes). - - - Accepts a minimum value of 30 seconds and a maximum value of 1,800 - seconds. - properties: - duration_secs: - type: optional - docs: >- - Duration in seconds for the timeout (e.g. 600 seconds represents 10 - minutes). - enabled: - type: boolean - docs: >- - Boolean indicating if this timeout is enabled. - - - If set to false, EVI will not timeout due to a specified maximum - duration being reached. However, the conversation will eventually - disconnect after 1,800 seconds (30 minutes), which is the maximum - WebSocket duration limit for EVI. - source: - openapi: evi-openapi.json - inline: true - PostedTimeoutSpecs: - docs: >- - Collection of timeout specifications returned by the server. - - - Timeouts are sent by the server when specific time-based events occur - during a chat session. These specifications set the inactivity timeout and - the maximum duration an EVI WebSocket connection can stay open before it - is automatically disconnected. - properties: - inactivity: - type: optional - docs: >- - Specifies the duration of user inactivity (in seconds) after which the - EVI WebSocket connection will be automatically disconnected. Default - is 600 seconds (10 minutes). - - - Accepts a minimum value of 30 seconds and a maximum value of 1,800 - seconds. - max_duration: - type: optional - docs: >- - Specifies the maximum allowed duration (in seconds) for an EVI - WebSocket connection before it is automatically disconnected. Default - is 1,800 seconds (30 minutes). - - - Accepts a minimum value of 30 seconds and a maximum value of 1,800 - seconds. - source: - openapi: evi-openapi.json - PostedUserDefinedToolSpec: - docs: A specific tool identifier to be posted to the server - properties: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: optional - docs: >- - Version number for a Tool. - - - Tools, Configs, Custom Voices, and Prompts are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - source: - openapi: evi-openapi.json - PostedWebhookEventType: - enum: - - chat_started - - chat_ended - - tool_call - docs: Events this URL is subscribed to - inline: true - source: - openapi: evi-openapi.json - PostedWebhookSpec: - docs: URL and settings for a specific webhook to be posted to the server - properties: - events: - docs: >- - The list of events the specified URL is subscribed to. - - - See our [webhooks - guide](/docs/speech-to-speech-evi/configuration/build-a-configuration#supported-events) - for more information on supported events. - type: list - url: - type: string - docs: >- - The URL where event payloads will be sent. This must be a valid https - URL to ensure secure communication. The server at this URL must accept - POST requests with a JSON payload. - source: - openapi: evi-openapi.json - ReturnBuiltinToolToolType: - enum: - - BUILTIN - - FUNCTION - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like web - search, or `FUNCTION` for user-defined tools. - inline: true - source: - openapi: evi-openapi.json - ReturnBuiltinTool: - docs: A specific builtin tool version returned from the server - properties: - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the Tool errors. - name: - type: string - docs: Name applied to all versions of a particular Tool. - tool_type: - type: ReturnBuiltinToolToolType - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like - web search, or `FUNCTION` for user-defined tools. - source: - openapi: evi-openapi.json - ReturnChatStatus: - enum: - - ACTIVE - - USER_ENDED - - USER_TIMEOUT - - MAX_DURATION_TIMEOUT - - INACTIVITY_TIMEOUT - - ERROR - docs: >- - Indicates the current state of the chat. There are six possible statuses: - - - - `ACTIVE`: The chat is currently active and ongoing. - - - - `USER_ENDED`: The chat was manually ended by the user. - - - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the maximum - allowed duration. - - - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - - - `ERROR`: The chat ended unexpectedly due to an error. - inline: true - source: - openapi: evi-openapi.json - ReturnChat: - docs: A description of chat and its status - properties: - chat_group_id: - type: string - docs: >- - Identifier for the Chat Group. Any chat resumed from this Chat will - have the same `chat_group_id`. Formatted as a UUID. - config: optional - end_timestamp: - type: optional - docs: >- - Time at which the Chat ended. Measured in seconds since the Unix - epoch. - event_count: - type: optional - docs: The total number of events currently in this chat. - id: - type: string - docs: Identifier for a Chat. Formatted as a UUID. - metadata: - type: optional - docs: Stringified JSON with additional metadata about the chat. - start_timestamp: - type: long - docs: >- - Time at which the Chat started. Measured in seconds since the Unix - epoch. - status: - type: ReturnChatStatus - docs: >- - Indicates the current state of the chat. There are six possible - statuses: - - - - `ACTIVE`: The chat is currently active and ongoing. - - - - `USER_ENDED`: The chat was manually ended by the user. - - - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the - maximum allowed duration. - - - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - - - `ERROR`: The chat ended unexpectedly due to an error. - source: - openapi: evi-openapi.json - ReturnChatEventRole: - enum: - - USER - - AGENT - - SYSTEM - - TOOL - docs: >- - The role of the entity which generated the Chat Event. There are four - possible values: - - - `USER`: The user, capable of sending user messages and interruptions. - - - `AGENT`: The assistant, capable of sending agent messages. - - - `SYSTEM`: The backend server, capable of transmitting errors. - - - `TOOL`: The function calling mechanism. - inline: true - source: - openapi: evi-openapi.json - ReturnChatEventType: - enum: - - FUNCTION_CALL - - FUNCTION_CALL_RESPONSE - - CHAT_END_MESSAGE - - AGENT_MESSAGE - - SYSTEM_PROMPT - - USER_RECORDING_START_MESSAGE - - RESUME_ONSET - - USER_INTERRUPTION - - CHAT_START_MESSAGE - - PAUSE_ONSET - - USER_MESSAGE - docs: >- - Type of Chat Event. There are eleven Chat Event types: - - - `SYSTEM_PROMPT`: The system prompt used to initialize the session. - - - `CHAT_START_MESSAGE`: Marks the beginning of the chat session. - - - `USER_RECORDING_START_MESSAGE`: Marks when the client began streaming - audio and the start of audio processing. - - - `USER_MESSAGE`: A message sent by the user. - - - `USER_INTERRUPTION`: A user-initiated interruption while the assistant - is speaking. - - - `AGENT_MESSAGE`: A response generated by the assistant. - - - `FUNCTION_CALL`: A record of a tool invocation by the assistant. - - - `FUNCTION_CALL_RESPONSE`: The result of a previously invoked function or - tool. - - - `PAUSE_ONSET`: Marks when the client sent a `pause_assistant_message` to - pause the assistant. - - - `RESUME_ONSET`: Marks when the client sent a `resume_assistant_message` - to resume the assistant. - - - `CHAT_END_MESSAGE`: Indicates the end of the chat session. - inline: true - source: - openapi: evi-openapi.json - ReturnChatEvent: - docs: A description of a single event in a chat returned from the server - properties: - chat_id: - type: string - docs: Identifier for the Chat this event occurred in. Formatted as a UUID. - emotion_features: - type: optional - docs: >- - Stringified JSON containing the prosody model inference results. - - - EVI uses the prosody model to measure 48 expressions related to speech - and vocal characteristics. These results contain a detailed emotional - and tonal analysis of the audio. Scores typically range from 0 to 1, - with higher values indicating a stronger confidence level in the - measured attribute. - id: - type: string - docs: Identifier for a Chat Event. Formatted as a UUID. - message_text: - type: optional - docs: >- - The text of the Chat Event. This field contains the message content - for each event type listed in the `type` field. - metadata: - type: optional - docs: Stringified JSON with additional metadata about the chat event. - related_event_id: - type: optional - docs: >- - Identifier for a related chat event. Currently only seen on - ASSISTANT_PROSODY events, to point back to the ASSISTANT_MESSAGE that - generated these prosody scores - role: - type: ReturnChatEventRole - docs: >- - The role of the entity which generated the Chat Event. There are four - possible values: - - - `USER`: The user, capable of sending user messages and - interruptions. - - - `AGENT`: The assistant, capable of sending agent messages. - - - `SYSTEM`: The backend server, capable of transmitting errors. - - - `TOOL`: The function calling mechanism. - timestamp: - type: long - docs: >- - Time at which the Chat Event occurred. Measured in seconds since the - Unix epoch. - type: - type: ReturnChatEventType - docs: >- - Type of Chat Event. There are eleven Chat Event types: - - - `SYSTEM_PROMPT`: The system prompt used to initialize the session. - - - `CHAT_START_MESSAGE`: Marks the beginning of the chat session. - - - `USER_RECORDING_START_MESSAGE`: Marks when the client began - streaming audio and the start of audio processing. - - - `USER_MESSAGE`: A message sent by the user. - - - `USER_INTERRUPTION`: A user-initiated interruption while the - assistant is speaking. - - - `AGENT_MESSAGE`: A response generated by the assistant. - - - `FUNCTION_CALL`: A record of a tool invocation by the assistant. - - - `FUNCTION_CALL_RESPONSE`: The result of a previously invoked - function or tool. - - - `PAUSE_ONSET`: Marks when the client sent a - `pause_assistant_message` to pause the assistant. - - - `RESUME_ONSET`: Marks when the client sent a - `resume_assistant_message` to resume the assistant. - - - `CHAT_END_MESSAGE`: Indicates the end of the chat session. - source: - openapi: evi-openapi.json - ReturnChatGroup: - docs: A description of chat_group and its status - properties: - active: - type: optional - docs: >- - Denotes whether there is an active Chat associated with this Chat - Group. - first_start_timestamp: - type: long - docs: >- - Time at which the first Chat in this Chat Group was created. Measured - in seconds since the Unix epoch. - id: - type: string - docs: >- - Identifier for the Chat Group. Any Chat resumed from this Chat Group - will have the same `chat_group_id`. Formatted as a UUID. - most_recent_chat_id: - type: optional - docs: >- - The `chat_id` of the most recent Chat in this Chat Group. Formatted as - a UUID. - most_recent_config: optional - most_recent_start_timestamp: - type: long - docs: >- - Time at which the most recent Chat in this Chat Group was created. - Measured in seconds since the Unix epoch. - num_chats: - type: integer - docs: The total number of Chats in this Chat Group. - source: - openapi: evi-openapi.json - ReturnChatGroupPagedAudioReconstructionsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - inline: true - source: - openapi: evi-openapi.json - ReturnChatGroupPagedAudioReconstructions: - docs: A paginated list of chat reconstructions for a particular chatgroup - properties: - audio_reconstructions_page: - docs: >- - List of chat audio reconstructions returned for the specified page - number and page size. - type: list - id: - type: string - docs: Identifier for the chat group. Formatted as a UUID. - num_chats: - type: integer - docs: Total number of chats in this chatgroup - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - pagination_direction: - type: ReturnChatGroupPagedAudioReconstructionsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - total_pages: - type: integer - docs: The total number of pages in the collection. - user_id: - type: string - docs: Identifier for the user that owns this chat. Formatted as a UUID. - source: - openapi: evi-openapi.json - ReturnChatGroupPagedChatsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - inline: true - source: - openapi: evi-openapi.json - ReturnChatGroupPagedChats: - docs: >- - A description of chat_group and its status with a paginated list of each - chat in the chat_group - properties: - active: - type: optional - docs: >- - Denotes whether there is an active Chat associated with this Chat - Group. - chats_page: - docs: List of Chats for the specified `page_number` and `page_size`. - type: list - first_start_timestamp: - type: long - docs: >- - Time at which the first Chat in this Chat Group was created. Measured - in seconds since the Unix epoch. - id: - type: string - docs: >- - Identifier for the Chat Group. Any Chat resumed from this Chat Group - will have the same `chat_group_id`. Formatted as a UUID. - most_recent_start_timestamp: - type: long - docs: >- - Time at which the most recent Chat in this Chat Group was created. - Measured in seconds since the Unix epoch. - num_chats: - type: integer - docs: The total number of Chats associated with this Chat Group. - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - pagination_direction: - type: ReturnChatGroupPagedChatsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnChatGroupPagedEventsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - inline: true - source: - openapi: evi-openapi.json - ReturnChatGroupPagedEvents: - docs: >- - A paginated list of chat events that occurred across chats in this - chat_group from the server - properties: - events_page: - docs: List of Chat Events for the specified `page_number` and `page_size`. - type: list - id: - type: string - docs: >- - Identifier for the Chat Group. Any Chat resumed from this Chat Group - will have the same `chat_group_id`. Formatted as a UUID. - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - pagination_direction: - type: ReturnChatGroupPagedEventsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnChatPagedEventsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - inline: true - source: - openapi: evi-openapi.json - ReturnChatPagedEventsStatus: - enum: - - ACTIVE - - USER_ENDED - - USER_TIMEOUT - - MAX_DURATION_TIMEOUT - - INACTIVITY_TIMEOUT - - ERROR - docs: >- - Indicates the current state of the chat. There are six possible statuses: - - - - `ACTIVE`: The chat is currently active and ongoing. - - - - `USER_ENDED`: The chat was manually ended by the user. - - - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the maximum - allowed duration. - - - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - - - `ERROR`: The chat ended unexpectedly due to an error. - inline: true - source: - openapi: evi-openapi.json - ReturnChatPagedEvents: - docs: >- - A description of chat status with a paginated list of chat events returned - from the server - properties: - chat_group_id: - type: string - docs: >- - Identifier for the Chat Group. Any chat resumed from this Chat will - have the same `chat_group_id`. Formatted as a UUID. - config: optional - end_timestamp: - type: optional - docs: >- - Time at which the Chat ended. Measured in seconds since the Unix - epoch. - events_page: - docs: List of Chat Events for the specified `page_number` and `page_size`. - type: list - id: - type: string - docs: Identifier for a Chat. Formatted as a UUID. - metadata: - type: optional - docs: Stringified JSON with additional metadata about the chat. - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - pagination_direction: - type: ReturnChatPagedEventsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - start_timestamp: - type: long - docs: >- - Time at which the Chat started. Measured in seconds since the Unix - epoch. - status: - type: ReturnChatPagedEventsStatus - docs: >- - Indicates the current state of the chat. There are six possible - statuses: - - - - `ACTIVE`: The chat is currently active and ongoing. - - - - `USER_ENDED`: The chat was manually ended by the user. - - - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the - maximum allowed duration. - - - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - - - `ERROR`: The chat ended unexpectedly due to an error. - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnConfig: - docs: A specific config version returned from the server - properties: - builtin_tools: - type: optional>> - docs: List of built-in tools associated with this config - created_on: - type: optional - docs: The timestamp when the first version of this config was created. - ellm_model: optional - event_messages: optional - evi_version: - type: optional - docs: The version of the EVI used with this config. - id: - type: optional - docs: Identifier for a Config. Formatted as a UUID. - language_model: optional - modified_on: - type: optional - docs: The timestamp when this version of the config was created. - name: - type: optional - docs: Name applied to all versions of a particular Config. - nudges: optional - prompt: optional - timeouts: optional - tools: - type: optional>> - docs: List of user-defined tools associated with this config. - version: - type: optional - docs: >- - Version number for a Config. Version numbers should be integers. The - combination of configId and version number is unique. - version_description: - type: optional - docs: Description that is appended to a specific version of a Config. - voice: optional - webhooks: - type: optional>> - docs: Map of webhooks associated with this config. - source: - openapi: evi-openapi.json - ReturnConfigSpec: - docs: The Config associated with this Chat. - properties: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: optional - docs: >- - Version number for a Config. - - - Configs, Prompts, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - source: - openapi: evi-openapi.json - ReturnEllmModel: - docs: A specific eLLM Model configuration - properties: - allow_short_responses: - type: boolean - docs: |- - Boolean indicating if the eLLM is allowed to generate short responses. - - If omitted, short responses from the eLLM are enabled by default. - source: - openapi: evi-openapi.json - ReturnEventMessageSpec: - docs: A specific event message configuration to be returned from the server - properties: - enabled: - type: boolean - docs: >- - Boolean indicating if this event message is enabled. - - - If set to `true`, a message will be sent when the circumstances for - the specific event are met. - text: - type: optional - docs: >- - Text to use as the event message when the corresponding event occurs. - If no text is specified, EVI will generate an appropriate message - based on its current context and the system prompt. - source: - openapi: evi-openapi.json - ReturnEventMessageSpecs: - docs: >- - Collection of event messages returned by the server. - - - Event messages are sent by the server when specific events occur during a - chat session. These messages are used to configure behaviors for EVI, such - as controlling how EVI starts a new conversation. - properties: - on_inactivity_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is about to be - disconnected due to a user inactivity timeout, such as a message - mentioning a lack of user input for a period of time. - - - Enabling an inactivity message allows developers to use this message - event for "checking in" with the user if they are not responding to - see if they are still active. - - - If the user does not respond in the number of seconds specified in the - `inactivity_timeout` field, then EVI will say the message and the user - has 15 seconds to respond. If they respond in time, the conversation - will continue; if not, the conversation will end. - - - However, if the inactivity message is not enabled, then reaching the - inactivity timeout will immediately end the connection. - on_max_duration_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is disconnected due - to reaching the maximum chat duration, such as a message mentioning - the time limit for the chat has been reached. - on_new_chat: - type: optional - docs: >- - Specifies the initial message EVI provides when a new chat is started, - such as a greeting or welcome message. - source: - openapi: evi-openapi.json - ReturnLanguageModel: - docs: A specific LanguageModel - properties: - model_provider: - type: optional - docs: The provider of the supplemental language model. - model_resource: - type: optional - docs: String that specifies the language model to use with `model_provider`. - temperature: - type: optional - docs: >- - The model temperature, with values between 0 to 1 (inclusive). - - - Controls the randomness of the LLM's output, with values closer to 0 - yielding focused, deterministic responses and values closer to 1 - producing more creative, diverse responses. - source: - openapi: evi-openapi.json - ReturnNudgeSpec: - docs: A specific nudge configuration returned from the server - properties: - enabled: - type: boolean - docs: EVI will nudge user after inactivity - interval_secs: - type: optional - docs: Time interval in seconds after which the nudge will be sent. - source: - openapi: evi-openapi.json - ReturnPagedChatGroupsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - inline: true - source: - openapi: evi-openapi.json - ReturnPagedChatGroups: - docs: A paginated list of chat_groups returned from the server - properties: - chat_groups_page: - docs: >- - List of Chat Groups and their metadata returned for the specified - `page_number` and `page_size`. - type: list - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - pagination_direction: - type: ReturnPagedChatGroupsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnPagedChatsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - inline: true - source: - openapi: evi-openapi.json - ReturnPagedChats: - docs: A paginated list of chats returned from the server - properties: - chats_page: - docs: >- - List of Chats and their metadata returned for the specified - `page_number` and `page_size`. - type: list - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - pagination_direction: - type: ReturnPagedChatsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnPagedConfigs: - docs: A paginated list of config versions returned from the server - properties: - configs_page: - type: optional> - docs: >- - List of configs returned for the specified `page_number` and - `page_size`. - page_number: - type: optional - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: optional - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnPagedPrompts: - docs: A paginated list of prompt versions returned from the server - properties: - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - prompts_page: - docs: >- - List of prompts returned for the specified `page_number` and - `page_size`. - type: list> - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnPagedUserDefinedTools: - docs: A paginated list of user defined tool versions returned from the server - properties: - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - tools_page: - docs: >- - List of tools returned for the specified `page_number` and - `page_size`. - type: list> - total_pages: - type: integer - docs: The total number of pages in the collection. - source: - openapi: evi-openapi.json - ReturnPromptVersionType: - enum: - - FIXED - - LATEST - docs: >- - Versioning method for a Prompt. Either `FIXED` for using a fixed version - number or `LATEST` for auto-updating to the latest version. - inline: true - source: - openapi: evi-openapi.json - ReturnPrompt: - docs: A specific prompt version returned from the server - properties: - created_on: - type: long - docs: The timestamp when the first version of this prompt was created. - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - modified_on: - type: long - docs: The timestamp when this version of the prompt was created. - name: - type: string - docs: Name applied to all versions of a particular Prompt. - text: - type: string - docs: Text used for this version of the Prompt. - version: - type: integer - docs: >- - Version number for a Prompt. Version numbers should be integers. The - combination of configId and version number is unique. - version_description: - type: optional - docs: Description that is appended to a specific version of a Prompt. - version_type: - type: string - docs: >- - Indicates whether this prompt is using a fixed version number or - auto-updating to the latest version. Values from the VersionType enum. - source: - openapi: evi-openapi.json - ReturnTimeoutSpec: - docs: A specific timeout configuration to be returned from the server - properties: - duration_secs: - type: optional - docs: >- - Duration in seconds for the timeout (e.g. 600 seconds represents 10 - minutes). - enabled: - type: boolean - docs: >- - Boolean indicating if this timeout is enabled. - - - If set to false, EVI will not timeout due to a specified duration - being reached. However, the conversation will eventually disconnect - after 1,800 seconds (30 minutes), which is the maximum WebSocket - duration limit for EVI. - source: - openapi: evi-openapi.json - ReturnTimeoutSpecs: - docs: >- - Collection of timeout specifications returned by the server. - - - Timeouts are sent by the server when specific time-based events occur - during a chat session. These specifications set the inactivity timeout and - the maximum duration an EVI WebSocket connection can stay open before it - is automatically disconnected. - properties: - inactivity: - type: ReturnTimeoutSpec - docs: >- - Specifies the duration of user inactivity (in seconds) after which the - EVI WebSocket connection will be automatically disconnected. Default - is 600 seconds (10 minutes). - - - Accepts a minimum value of 30 seconds and a maximum value of 1,800 - seconds. - max_duration: - type: ReturnTimeoutSpec - docs: >- - Specifies the maximum allowed duration (in seconds) for an EVI - WebSocket connection before it is automatically disconnected. Default - is 1,800 seconds (30 minutes). - - - Accepts a minimum value of 30 seconds and a maximum value of 1,800 - seconds. - source: - openapi: evi-openapi.json - ReturnUserDefinedToolToolType: - enum: - - BUILTIN - - FUNCTION - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like web - search, or `FUNCTION` for user-defined tools. - inline: true - source: - openapi: evi-openapi.json - ReturnUserDefinedToolVersionType: - enum: - - FIXED - - LATEST - docs: >- - Versioning method for a Tool. Either `FIXED` for using a fixed version - number or `LATEST` for auto-updating to the latest version. - inline: true - source: - openapi: evi-openapi.json - ReturnUserDefinedTool: - docs: A specific tool version returned from the server - properties: - created_on: - type: long - docs: >- - Time at which the Tool was created. Measured in seconds since the Unix - epoch. - description: - type: optional - docs: >- - An optional description of what the Tool does, used by the - supplemental LLM to choose when and how to call the function. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the Tool errors. - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - modified_on: - type: long - docs: >- - Time at which the Tool was last modified. Measured in seconds since - the Unix epoch. - name: - type: string - docs: Name applied to all versions of a particular Tool. - parameters: - type: string - docs: >- - Stringified JSON defining the parameters used by this version of the - Tool. - - - These parameters define the inputs needed for the Tool's execution, - including the expected data type and description for each input field. - Structured as a stringified JSON schema, this format ensures the tool - receives data in the expected format. - tool_type: - type: ReturnUserDefinedToolToolType - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like - web search, or `FUNCTION` for user-defined tools. - version: - type: integer - docs: >- - Version number for a Tool. - - - Tools, Configs, Custom Voices, and Prompts are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - version_description: - type: optional - docs: An optional description of the Tool version. - version_type: - type: ReturnUserDefinedToolVersionType - docs: >- - Versioning method for a Tool. Either `FIXED` for using a fixed version - number or `LATEST` for auto-updating to the latest version. - source: - openapi: evi-openapi.json - ReturnWebhookEventType: - enum: - - chat_started - - chat_ended - - tool_call - docs: Events this URL is subscribed to - inline: true - source: - openapi: evi-openapi.json - ReturnWebhookSpec: - docs: Collection of webhook URL endpoints to be returned from the server - properties: - events: - docs: >- - The list of events the specified URL is subscribed to. - - - See our [webhooks - guide](/docs/speech-to-speech-evi/configuration/build-a-configuration#supported-events) - for more information on supported events. - type: list - url: - type: string - docs: >- - The URL where event payloads will be sent. This must be a valid https - URL to ensure secure communication. The server at this URL must accept - POST requests with a JSON payload. - source: - openapi: evi-openapi.json - VoiceId: - properties: - id: - type: string - docs: ID of the voice in the `Voice Library`. - provider: - type: optional - docs: Model provider associated with this Voice ID. - source: - openapi: evi-openapi.json - VoiceName: - properties: - name: - type: string - docs: Name of the voice in the `Voice Library`. - provider: - type: optional - docs: Model provider associated with this Voice Name. - source: - openapi: evi-openapi.json - VoiceRef: - discriminated: false - union: - - type: VoiceId - - type: VoiceName - source: - openapi: evi-openapi.json - ReturnVoice: - docs: An Octave voice available for text-to-speech - properties: - compatible_octave_models: optional> - id: optional - name: optional - provider: optional - source: - openapi: evi-openapi.json - VoiceProvider: - enum: - - HUME_AI - - CUSTOM_VOICE - source: - openapi: evi-openapi.json diff --git a/.mock/definition/empathic-voice/chat.yml b/.mock/definition/empathic-voice/chat.yml deleted file mode 100644 index c69e2ab8..00000000 --- a/.mock/definition/empathic-voice/chat.yml +++ /dev/null @@ -1,149 +0,0 @@ -imports: - root: __package__.yml -channel: - path: /chat - url: evi - auth: false - docs: Chat with Empathic Voice Interface (EVI) - query-parameters: - access_token: - type: optional - default: '' - docs: >- - Access token used for authenticating the client. If not provided, an - `api_key` must be provided to authenticate. - - - The access token is generated using both an API key and a Secret key, - which provides an additional layer of security compared to using just an - API key. - - - For more details, refer to the [Authentication Strategies - Guide](/docs/introduction/api-key#authentication-strategies). - allow_connection: - type: optional - default: false - docs: Allows external connections to this chat via the /connect endpoint. - config_id: - type: optional - docs: >- - The unique identifier for an EVI configuration. - - - Include this ID in your connection request to equip EVI with the Prompt, - Language Model, Voice, and Tools associated with the specified - configuration. If omitted, EVI will apply [default configuration - settings](/docs/speech-to-speech-evi/configuration/build-a-configuration#default-configuration). - - - For help obtaining this ID, see our [Configuration - Guide](/docs/speech-to-speech-evi/configuration). - config_version: - type: optional - docs: >- - The version number of the EVI configuration specified by the - `config_id`. - - - Configs, as well as Prompts and Tools, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine configurations and revert to previous versions if needed. - - - Include this parameter to apply a specific version of an EVI - configuration. If omitted, the latest version will be applied. - event_limit: - type: optional - docs: >- - The maximum number of chat events to return from chat history. By - default, the system returns up to 300 events (100 events per page × 3 - pages). Set this parameter to a smaller value to limit the number of - events returned. - resumed_chat_group_id: - type: optional - docs: >- - The unique identifier for a Chat Group. Use this field to preserve - context from a previous Chat session. - - - A Chat represents a single session from opening to closing a WebSocket - connection. In contrast, a Chat Group is a series of resumed Chats that - collectively represent a single conversation spanning multiple sessions. - Each Chat includes a Chat Group ID, which is used to preserve the - context of previous Chat sessions when starting a new one. - - - Including the Chat Group ID in the `resumed_chat_group_id` query - parameter is useful for seamlessly resuming a Chat after unexpected - network disconnections and for picking up conversations exactly where - you left off at a later time. This ensures preserved context across - multiple sessions. - - - There are three ways to obtain the Chat Group ID: - - - - [Chat - Metadata](/reference/speech-to-speech-evi/chat#receive.ChatMetadata): - Upon establishing a WebSocket connection with EVI, the user receives a - Chat Metadata message. This message contains a `chat_group_id`, which - can be used to resume conversations within this chat group in future - sessions. - - - - [List Chats - endpoint](/reference/speech-to-speech-evi/chats/list-chats): Use the GET - `/v0/evi/chats` endpoint to obtain the Chat Group ID of individual Chat - sessions. This endpoint lists all available Chat sessions and their - associated Chat Group ID. - - - - [List Chat Groups - endpoint](/reference/speech-to-speech-evi/chat-groups/list-chat-groups): - Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs - of all Chat Groups associated with an API key. This endpoint returns a - list of all available chat groups. - verbose_transcription: - type: optional - default: false - docs: >- - A flag to enable verbose transcription. Set this query parameter to - `true` to have unfinalized user transcripts be sent to the client as - interim UserMessage messages. The - [interim](/reference/speech-to-speech-evi/chat#receive.UserMessage.interim) - field on a - [UserMessage](/reference/speech-to-speech-evi/chat#receive.UserMessage) - denotes whether the message is "interim" or "final." - api_key: - type: optional - default: '' - docs: >- - API key used for authenticating the client. If not provided, an - `access_token` must be provided to authenticate. - - - For more details, refer to the [Authentication Strategies - Guide](/docs/introduction/api-key#authentication-strategies). - session_settings: root.ConnectSessionSettings - messages: - publish: - origin: client - body: PublishEvent - subscribe: - origin: server - body: root.SubscribeEvent -types: - PublishEvent: - discriminated: false - union: - - type: root.AudioInput - - type: root.SessionSettings - - type: root.UserInput - - type: root.AssistantInput - - type: root.ToolResponseMessage - - type: root.ToolErrorMessage - - type: root.PauseAssistantMessage - - type: root.ResumeAssistantMessage - source: - openapi: evi-asyncapi.json diff --git a/.mock/definition/empathic-voice/chatGroups.yml b/.mock/definition/empathic-voice/chatGroups.yml deleted file mode 100644 index 0ed0d310..00000000 --- a/.mock/definition/empathic-voice/chatGroups.yml +++ /dev/null @@ -1,626 +0,0 @@ -imports: - root: __package__.yml -service: - auth: false - base-path: '' - endpoints: - list-chat-groups: - path: /v0/evi/chat_groups - method: GET - docs: Fetches a paginated list of **Chat Groups**. - pagination: - offset: $request.page_number - results: $response.chat_groups_page - source: - openapi: evi-openapi.json - display-name: List chat_groups - request: - name: ChatGroupsListChatGroupsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - config_id: - type: optional - docs: >- - The unique identifier for an EVI configuration. - - - Filter Chat Groups to only include Chats that used this - `config_id` in their most recent Chat. - validation: - format: uuid - response: - docs: Success - type: root.ReturnPagedChatGroups - status-code: 200 - errors: - - root.BadRequestError - examples: - - query-parameters: - page_number: 0 - page_size: 1 - ascending_order: true - config_id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - response: - body: - page_number: 0 - page_size: 1 - total_pages: 1 - pagination_direction: ASC - chat_groups_page: - - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f - first_start_timestamp: 1721844196397 - most_recent_start_timestamp: 1721861821717 - active: false - most_recent_chat_id: dfdbdd4d-0ddf-418b-8fc4-80a266579d36 - num_chats: 5 - get-chat-group: - path: /v0/evi/chat_groups/{id} - method: GET - docs: >- - Fetches a **ChatGroup** by ID, including a paginated list of **Chats** - associated with the **ChatGroup**. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Chat Group. Formatted as a UUID. - display-name: Get chat_group - request: - name: ChatGroupsGetChatGroupRequest - query-parameters: - status: - type: optional - docs: Chat status to apply to the chat. String from the ChatStatus enum. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - response: - docs: Success - type: root.ReturnChatGroupPagedChats - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f - query-parameters: - page_number: 0 - page_size: 1 - ascending_order: true - response: - body: - id: 369846cf-6ad5-404d-905e-a8acb5cdfc78 - first_start_timestamp: 1712334213647 - most_recent_start_timestamp: 1712334213647 - num_chats: 1 - page_number: 0 - page_size: 1 - total_pages: 1 - pagination_direction: ASC - chats_page: - - id: 6375d4f8-cd3e-4d6b-b13b-ace66b7c8aaa - chat_group_id: 369846cf-6ad5-404d-905e-a8acb5cdfc78 - status: USER_ENDED - start_timestamp: 1712334213647 - end_timestamp: 1712334332571 - event_count: 0 - metadata: null - config: null - active: false - get-audio: - path: /v0/evi/chat_groups/{id}/audio - method: GET - docs: >- - Fetches a paginated list of audio for each **Chat** within the specified - **Chat Group**. For more details, see our guide on audio reconstruction - [here](/docs/speech-to-speech-evi/faq#can-i-access-the-audio-of-previous-conversations-with-evi). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Chat Group. Formatted as a UUID. - display-name: Get chat group audio - request: - name: ChatGroupsGetAudioRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - response: - docs: Success - type: root.ReturnChatGroupPagedAudioReconstructions - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 369846cf-6ad5-404d-905e-a8acb5cdfc78 - query-parameters: - page_number: 0 - page_size: 10 - ascending_order: true - response: - body: - id: 369846cf-6ad5-404d-905e-a8acb5cdfc78 - user_id: e6235940-cfda-3988-9147-ff531627cf42 - num_chats: 1 - page_number: 0 - page_size: 10 - total_pages: 1 - pagination_direction: ASC - audio_reconstructions_page: - - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - user_id: e6235940-cfda-3988-9147-ff531627cf42 - status: COMPLETE - filename: >- - e6235940-cfda-3988-9147-ff531627cf42/470a49f6-1dec-4afe-8b61-035d3b2d63b0/reconstructed_audio.mp4 - modified_at: 1729875432555 - signed_audio_url: https://storage.googleapis.com/...etc. - signed_url_expiration_timestamp_millis: 1730232816964 - list-chat-group-events: - path: /v0/evi/chat_groups/{id}/events - method: GET - docs: >- - Fetches a paginated list of **Chat** events associated with a **Chat - Group**. - pagination: - offset: $request.page_number - results: $response.events_page - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Chat Group. Formatted as a UUID. - display-name: List chat events from a specific chat_group - request: - name: ChatGroupsListChatGroupEventsRequest - query-parameters: - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - response: - docs: Success - type: root.ReturnChatGroupPagedEvents - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f - query-parameters: - page_number: 0 - page_size: 3 - ascending_order: true - response: - body: - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f - page_number: 0 - page_size: 3 - total_pages: 1 - pagination_direction: ASC - events_page: - - id: 5d44bdbb-49a3-40fb-871d-32bf7e76efe7 - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244940762 - role: SYSTEM - type: SYSTEM_PROMPT - message_text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - emotion_features: '' - metadata: '' - - id: 5976ddf6-d093-4bb9-ba60-8f6c25832dde - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244956278 - role: USER - type: USER_MESSAGE - message_text: Hello. - emotion_features: >- - {"Admiration": 0.09906005859375, "Adoration": - 0.12213134765625, "Aesthetic Appreciation": - 0.05035400390625, "Amusement": 0.16552734375, "Anger": - 0.0037384033203125, "Anxiety": 0.010101318359375, "Awe": - 0.058197021484375, "Awkwardness": 0.10552978515625, - "Boredom": 0.1141357421875, "Calmness": 0.115234375, - "Concentration": 0.00444793701171875, "Confusion": - 0.0343017578125, "Contemplation": 0.00812530517578125, - "Contempt": 0.009002685546875, "Contentment": - 0.087158203125, "Craving": 0.00818634033203125, "Desire": - 0.018310546875, "Determination": 0.003238677978515625, - "Disappointment": 0.024169921875, "Disgust": - 0.00702667236328125, "Distress": 0.00936126708984375, - "Doubt": 0.00632476806640625, "Ecstasy": 0.0293731689453125, - "Embarrassment": 0.01800537109375, "Empathic Pain": - 0.0088348388671875, "Entrancement": 0.013397216796875, - "Envy": 0.02557373046875, "Excitement": 0.12109375, "Fear": - 0.004413604736328125, "Guilt": 0.016571044921875, "Horror": - 0.00274658203125, "Interest": 0.2142333984375, "Joy": - 0.29638671875, "Love": 0.16015625, "Nostalgia": - 0.007843017578125, "Pain": 0.007160186767578125, "Pride": - 0.00508880615234375, "Realization": 0.054229736328125, - "Relief": 0.048736572265625, "Romance": 0.026397705078125, - "Sadness": 0.0265350341796875, "Satisfaction": - 0.051361083984375, "Shame": 0.00974273681640625, "Surprise - (negative)": 0.0218963623046875, "Surprise (positive)": - 0.216064453125, "Sympathy": 0.021728515625, "Tiredness": - 0.0173797607421875, "Triumph": 0.004520416259765625} - metadata: >- - {"segments": [{"content": "Hello.", "embedding": - [0.6181640625, 0.1763916015625, -30.921875, 1.2705078125, - 0.927734375, 0.63720703125, 2.865234375, 0.1080322265625, - 0.2978515625, 1.0107421875, 1.34375, 0.74560546875, - 0.416259765625, 0.99462890625, -0.333740234375, - 0.361083984375, -1.388671875, 1.0107421875, 1.3173828125, - 0.55615234375, 0.541015625, -0.1837158203125, 1.697265625, - 0.228515625, 2.087890625, -0.311767578125, - 0.053680419921875, 1.3349609375, 0.95068359375, - 0.00441741943359375, 0.705078125, 1.8916015625, - -0.939453125, 0.93701171875, -0.28955078125, 1.513671875, - 0.5595703125, 1.0126953125, -0.1624755859375, 1.4072265625, - -0.28857421875, -0.4560546875, -0.1500244140625, - -0.1102294921875, -0.222412109375, 0.8779296875, - 1.275390625, 1.6689453125, 0.80712890625, -0.34814453125, - -0.325439453125, 0.412841796875, 0.81689453125, - 0.55126953125, 1.671875, 0.6611328125, 0.7451171875, - 1.50390625, 1.0224609375, -1.671875, 0.7373046875, - 2.1328125, 2.166015625, 0.41015625, -0.127685546875, - 1.9345703125, -4.2734375, 0.332275390625, 0.26171875, - 0.76708984375, 0.2685546875, 0.468017578125, 1.208984375, - -1.517578125, 1.083984375, 0.84814453125, 1.0244140625, - -0.0072174072265625, 1.34375, 1.0712890625, 1.517578125, - -0.52001953125, 0.59228515625, 0.8154296875, -0.951171875, - -0.07757568359375, 1.3330078125, 1.125, 0.61181640625, - 1.494140625, 0.357421875, 1.1796875, 1.482421875, 0.8046875, - 0.1536865234375, 1.8076171875, 0.68115234375, -15.171875, - 1.2294921875, 0.319091796875, 0.499755859375, 1.5771484375, - 0.94677734375, -0.2490234375, 0.88525390625, 3.47265625, - 0.75927734375, 0.71044921875, 1.2333984375, 1.4169921875, - -0.56640625, -1.8095703125, 1.37109375, 0.428955078125, - 1.89453125, -0.39013671875, 0.1734619140625, 1.5595703125, - -1.2294921875, 2.552734375, 0.58349609375, 0.2156982421875, - -0.00984954833984375, -0.6865234375, -0.0272979736328125, - -0.2264404296875, 2.853515625, 1.3896484375, 0.52978515625, - 0.783203125, 3.0390625, 0.75537109375, 0.219970703125, - 0.384521484375, 0.385986328125, 2.0546875, - -0.10443115234375, 1.5146484375, 1.4296875, 1.9716796875, - 1.1318359375, 0.31591796875, 0.338623046875, 1.654296875, - -0.88037109375, -0.21484375, 1.45703125, 1.0380859375, - -0.52294921875, -0.47802734375, 0.1650390625, 1.2392578125, - -1.138671875, 0.56787109375, 1.318359375, 0.4287109375, - 0.1981201171875, 2.4375, 0.281005859375, 0.89404296875, - -0.1552734375, 0.6474609375, -0.08331298828125, - 0.00740814208984375, -0.045501708984375, -0.578125, - 2.02734375, 0.59228515625, 0.35693359375, 1.2919921875, - 1.22265625, 1.0537109375, 0.145263671875, 1.05859375, - -0.369140625, 0.207275390625, 0.78857421875, 0.599609375, - 0.99072265625, 0.24462890625, 1.26953125, 0.08404541015625, - 1.349609375, 0.73291015625, 1.3212890625, 0.388916015625, - 1.0869140625, 0.9931640625, -1.5673828125, 0.0462646484375, - 0.650390625, 0.253662109375, 0.58251953125, 1.8134765625, - 0.8642578125, 2.591796875, 0.7314453125, 0.85986328125, - 0.5615234375, 0.9296875, 0.04144287109375, 1.66015625, - 1.99609375, 1.171875, 1.181640625, 1.5126953125, - 0.0224456787109375, 0.58349609375, -1.4931640625, - 0.81884765625, 0.732421875, -0.6455078125, -0.62451171875, - 1.7802734375, 0.01526641845703125, -0.423095703125, - 0.461669921875, 4.87890625, 1.2392578125, -0.6953125, - 0.6689453125, 0.62451171875, -1.521484375, 1.7685546875, - 0.810546875, 0.65478515625, 0.26123046875, 1.6396484375, - 0.87548828125, 1.7353515625, 2.046875, 1.5634765625, - 0.69384765625, 1.375, 0.8916015625, 1.0107421875, - 0.1304931640625, 2.009765625, 0.06402587890625, - -0.08428955078125, 0.04351806640625, -1.7529296875, - 2.02734375, 3.521484375, 0.404541015625, 1.6337890625, - -0.276611328125, 0.8837890625, -0.1287841796875, - 0.91064453125, 0.8193359375, 0.701171875, 0.036529541015625, - 1.26171875, 1.0478515625, -0.1422119140625, 1.0634765625, - 0.61083984375, 1.3505859375, 1.208984375, 0.57275390625, - 1.3623046875, 2.267578125, 0.484375, 0.9150390625, - 0.56787109375, -0.70068359375, 0.27587890625, - -0.70654296875, 0.8466796875, 0.57568359375, 1.6162109375, - 0.87939453125, 2.248046875, -0.5458984375, 1.7744140625, - 1.328125, 1.232421875, 0.6806640625, 0.9365234375, - 1.052734375, -1.08984375, 1.8330078125, -0.4033203125, - 1.0673828125, 0.297607421875, 1.5703125, 1.67578125, - 1.34765625, 2.8203125, 2.025390625, -0.48583984375, - 0.7626953125, 0.01007843017578125, 1.435546875, - 0.007205963134765625, 0.05157470703125, -0.9853515625, - 0.26708984375, 1.16796875, 1.2041015625, 1.99609375, - -0.07916259765625, 1.244140625, -0.32080078125, - 0.6748046875, 0.419921875, 1.3212890625, 1.291015625, - 0.599609375, 0.0550537109375, 0.9599609375, 0.93505859375, - 0.111083984375, 1.302734375, 0.0833740234375, 2.244140625, - 1.25390625, 1.6015625, 0.58349609375, 1.7568359375, - -0.263427734375, -0.019866943359375, -0.24658203125, - -0.1871337890625, 0.927734375, 0.62255859375, - 0.275146484375, 0.79541015625, 1.1796875, 1.1767578125, - -0.26123046875, -0.268310546875, 1.8994140625, 1.318359375, - 2.1875, 0.2469482421875, 1.41015625, 0.03973388671875, - 1.2685546875, 1.1025390625, 0.9560546875, 0.865234375, - -1.92578125, 1.154296875, 0.389892578125, 1.130859375, - 0.95947265625, 0.72314453125, 2.244140625, - 0.048553466796875, 0.626953125, 0.42919921875, - 0.82275390625, 0.311767578125, -0.320556640625, - 0.01041412353515625, 0.1483154296875, 0.10809326171875, - -0.3173828125, 1.1337890625, -0.8642578125, 1.4033203125, - 0.048828125, 1.1787109375, 0.98779296875, 1.818359375, - 1.1552734375, 0.6015625, 1.2392578125, -1.2685546875, - 0.39208984375, 0.83251953125, 0.224365234375, - 0.0019989013671875, 0.87548828125, 1.6572265625, - 1.107421875, 0.434814453125, 1.8251953125, 0.442626953125, - 1.2587890625, 0.09320068359375, -0.896484375, 1.8017578125, - 1.451171875, -0.0755615234375, 0.6083984375, 2.06640625, - 0.673828125, -0.33740234375, 0.192138671875, 0.21435546875, - 0.80224609375, -1.490234375, 0.9501953125, 0.86083984375, - -0.40283203125, 4.109375, 2.533203125, 1.2529296875, - 0.8271484375, 0.225830078125, 1.0478515625, -1.9755859375, - 0.841796875, 0.392822265625, 0.525390625, 0.33935546875, - -0.79443359375, 0.71630859375, 0.97998046875, - -0.175537109375, 0.97705078125, 1.705078125, 0.29638671875, - 0.68359375, 0.54150390625, 0.435791015625, 0.99755859375, - -0.369140625, 1.009765625, -0.140380859375, 0.426513671875, - 0.189697265625, 1.8193359375, 1.1201171875, -0.5009765625, - -0.331298828125, 0.759765625, -0.09442138671875, 0.74609375, - -1.947265625, 1.3544921875, -3.935546875, 2.544921875, - 1.359375, 0.1363525390625, 0.79296875, 0.79931640625, - -0.3466796875, 1.1396484375, -0.33447265625, 2.0078125, - -0.241455078125, 0.6318359375, 0.365234375, 0.296142578125, - 0.830078125, 1.0458984375, 0.5830078125, 0.61572265625, - 14.0703125, -2.0078125, -0.381591796875, 1.228515625, - 0.08282470703125, -0.67822265625, -0.04339599609375, - 0.397216796875, 0.1656494140625, 0.137451171875, - 0.244873046875, 1.1611328125, -1.3818359375, 0.8447265625, - 1.171875, 0.36328125, 0.252685546875, 0.1197509765625, - 0.232177734375, -0.020172119140625, 0.64404296875, - -0.01100921630859375, -1.9267578125, 0.222412109375, - 0.56005859375, 1.3046875, 1.1630859375, 1.197265625, - 1.02734375, 1.6806640625, -0.043731689453125, 1.4697265625, - 0.81201171875, 1.5390625, 1.240234375, -0.7353515625, - 1.828125, 1.115234375, 1.931640625, -0.517578125, - 0.77880859375, 1.0546875, 0.95361328125, 3.42578125, - 0.0160369873046875, 0.875, 0.56005859375, 1.2421875, - 1.986328125, 1.4814453125, 0.0948486328125, 1.115234375, - 0.00665283203125, 2.09375, 0.3544921875, -0.52783203125, - 1.2099609375, 0.45068359375, 0.65625, 0.1112060546875, - 1.0751953125, -0.9521484375, -0.30029296875, 1.4462890625, - 2.046875, 3.212890625, 1.68359375, 1.07421875, - -0.5263671875, 0.74560546875, 1.37890625, 0.15283203125, - 0.2440185546875, 0.62646484375, -0.1280517578125, - 0.7646484375, -0.515625, -0.35693359375, 1.2958984375, - 0.96923828125, 0.58935546875, 1.3701171875, 1.0673828125, - 0.2337646484375, 0.93115234375, 0.66357421875, 6.0, - 1.1025390625, -0.51708984375, -0.38330078125, 0.7197265625, - 0.246826171875, -0.45166015625, 1.9521484375, 0.5546875, - 0.08807373046875, 0.18505859375, 0.8857421875, - -0.57177734375, 0.251708984375, 0.234375, 2.57421875, - 0.9599609375, 0.5029296875, 0.10382080078125, - 0.08331298828125, 0.66748046875, -0.349609375, 1.287109375, - 0.259765625, 2.015625, 2.828125, -0.3095703125, - -0.164306640625, -0.3408203125, 0.486572265625, - 0.8466796875, 1.9130859375, 0.09088134765625, 0.66552734375, - 0.00972747802734375, -0.83154296875, 1.755859375, - 0.654296875, 0.173828125, 0.27587890625, -0.47607421875, - -0.264404296875, 0.7529296875, 0.6533203125, 0.7275390625, - 0.499755859375, 0.833984375, -0.44775390625, -0.05078125, - -0.454833984375, 0.75439453125, 0.68505859375, - 0.210693359375, -0.283935546875, -0.53564453125, - 0.96826171875, 0.861328125, -3.33984375, -0.26171875, - 0.77734375, 0.26513671875, -0.14111328125, -0.042236328125, - -0.84814453125, 0.2137451171875, 0.94921875, 0.65185546875, - -0.5380859375, 0.1529541015625, -0.360595703125, - -0.0333251953125, -0.69189453125, 0.8974609375, 0.7109375, - 0.81494140625, -0.259521484375, 1.1904296875, 0.62158203125, - 1.345703125, 0.89404296875, 0.70556640625, 1.0673828125, - 1.392578125, 0.5068359375, 0.962890625, 0.736328125, - 1.55078125, 0.50390625, -0.398681640625, 2.361328125, - 0.345947265625, -0.61962890625, 0.330078125, 0.75439453125, - -0.673828125, -0.2379150390625, 1.5673828125, 1.369140625, - 0.1119384765625, -0.1834716796875, 1.4599609375, - -0.77587890625, 0.5556640625, 0.09954833984375, - 0.0285186767578125, 0.58935546875, -0.501953125, - 0.212890625, 0.02679443359375, 0.1715087890625, - 0.03466796875, -0.564453125, 2.029296875, 2.45703125, - -0.72216796875, 2.138671875, 0.50830078125, - -0.09356689453125, 0.230224609375, 1.6943359375, - 1.5126953125, 0.39453125, 0.411376953125, 1.07421875, - -0.8046875, 0.51416015625, 0.2271728515625, -0.283447265625, - 0.38427734375, 0.73388671875, 0.6962890625, 1.4990234375, - 0.02813720703125, 0.40478515625, 1.2451171875, 1.1162109375, - -5.5703125, 0.76171875, 0.322021484375, 1.0361328125, - 1.197265625, 0.1163330078125, 0.2425537109375, 1.5595703125, - 1.5791015625, -0.0921630859375, 0.484619140625, - 1.9052734375, 5.31640625, 1.6337890625, 0.95947265625, - -0.1751708984375, 0.466552734375, 0.8330078125, 1.03125, - 0.2044677734375, 0.31298828125, -1.1220703125, 0.5517578125, - 0.93505859375, 0.45166015625, 1.951171875, 0.65478515625, - 1.30859375, 1.0859375, 0.56494140625, 2.322265625, - 0.242919921875, 1.81640625, -0.469970703125, -0.841796875, - 0.90869140625, 1.5361328125, 0.923828125, 1.0595703125, - 0.356689453125, -0.46142578125, 2.134765625, 1.3037109375, - -0.32373046875, -9.2265625, 0.4521484375, 0.88037109375, - -0.53955078125, 0.96484375, 0.7705078125, 0.84521484375, - 1.580078125, -0.1448974609375, 0.7607421875, 1.0166015625, - -0.086669921875, 1.611328125, 0.05938720703125, 0.5078125, - 0.8427734375, 2.431640625, 0.66357421875, 3.203125, - 0.132080078125, 0.461181640625, 0.779296875, 1.9482421875, - 1.8720703125, 0.845703125, -1.3837890625, -0.138916015625, - 0.35546875, 0.2457275390625, 0.75341796875, 1.828125, - 1.4169921875, 0.60791015625, 1.0068359375, 1.109375, - 0.484130859375, -0.302001953125, 0.4951171875, 0.802734375, - 1.9482421875, 0.916015625, 0.1646728515625, 2.599609375, - 1.7177734375, -0.2374267578125, 0.98046875, 0.39306640625, - -1.1396484375, 1.6533203125, 0.375244140625], "scores": - [0.09906005859375, 0.12213134765625, 0.05035400390625, - 0.16552734375, 0.0037384033203125, 0.010101318359375, - 0.058197021484375, 0.10552978515625, 0.1141357421875, - 0.115234375, 0.00444793701171875, 0.00812530517578125, - 0.0343017578125, 0.009002685546875, 0.087158203125, - 0.00818634033203125, 0.003238677978515625, 0.024169921875, - 0.00702667236328125, 0.00936126708984375, - 0.00632476806640625, 0.0293731689453125, 0.01800537109375, - 0.0088348388671875, 0.013397216796875, 0.02557373046875, - 0.12109375, 0.004413604736328125, 0.016571044921875, - 0.00274658203125, 0.2142333984375, 0.29638671875, - 0.16015625, 0.007843017578125, 0.007160186767578125, - 0.00508880615234375, 0.054229736328125, 0.048736572265625, - 0.026397705078125, 0.0265350341796875, 0.051361083984375, - 0.018310546875, 0.00974273681640625, 0.0218963623046875, - 0.216064453125, 0.021728515625, 0.0173797607421875, - 0.004520416259765625], "stoks": [52, 52, 52, 52, 52, 41, 41, - 374, 303, 303, 303, 427], "time": {"begin_ms": 640, - "end_ms": 1140}}]} - - id: 7645a0d1-2e64-410d-83a8-b96040432e9a - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244957031 - role: AGENT - type: AGENT_MESSAGE - message_text: Hello! - emotion_features: >- - {"Admiration": 0.044921875, "Adoration": 0.0253753662109375, - "Aesthetic Appreciation": 0.03265380859375, "Amusement": - 0.118408203125, "Anger": 0.06719970703125, "Anxiety": - 0.0411376953125, "Awe": 0.03802490234375, "Awkwardness": - 0.056549072265625, "Boredom": 0.04217529296875, "Calmness": - 0.08709716796875, "Concentration": 0.070556640625, - "Confusion": 0.06964111328125, "Contemplation": - 0.0343017578125, "Contempt": 0.037689208984375, - "Contentment": 0.059417724609375, "Craving": - 0.01132965087890625, "Desire": 0.01406097412109375, - "Determination": 0.1143798828125, "Disappointment": - 0.051177978515625, "Disgust": 0.028594970703125, "Distress": - 0.054901123046875, "Doubt": 0.04638671875, "Ecstasy": - 0.0258026123046875, "Embarrassment": 0.0222015380859375, - "Empathic Pain": 0.015777587890625, "Entrancement": - 0.0160980224609375, "Envy": 0.0163421630859375, - "Excitement": 0.129638671875, "Fear": 0.03125, "Guilt": - 0.01483917236328125, "Horror": 0.0194549560546875, - "Interest": 0.1341552734375, "Joy": 0.0738525390625, "Love": - 0.0216522216796875, "Nostalgia": 0.0210418701171875, "Pain": - 0.020721435546875, "Pride": 0.05499267578125, "Realization": - 0.0728759765625, "Relief": 0.04052734375, "Romance": - 0.0129241943359375, "Sadness": 0.0254669189453125, - "Satisfaction": 0.07159423828125, "Shame": 0.01495361328125, - "Surprise (negative)": 0.05560302734375, "Surprise - (positive)": 0.07965087890625, "Sympathy": - 0.022247314453125, "Tiredness": 0.0194549560546875, - "Triumph": 0.04107666015625} - metadata: '' - source: - openapi: evi-openapi.json diff --git a/.mock/definition/empathic-voice/chatWebhooks.yml b/.mock/definition/empathic-voice/chatWebhooks.yml deleted file mode 100644 index 6e66c302..00000000 --- a/.mock/definition/empathic-voice/chatWebhooks.yml +++ /dev/null @@ -1,57 +0,0 @@ -imports: - root: __package__.yml -webhooks: - chatEnded: - audiences: [] - method: POST - display-name: Chat Ended - headers: {} - payload: root.WebhookEventChatEnded - examples: - - payload: - chat_group_id: chat_group_id - chat_id: chat_id - config_id: null - caller_number: null - custom_session_id: null - duration_seconds: 1 - end_reason: ACTIVE - end_time: 1 - docs: Sent when an EVI chat ends. - chatStarted: - audiences: [] - method: POST - display-name: Chat Started - headers: {} - payload: root.WebhookEventChatStarted - examples: - - payload: - chat_group_id: chat_group_id - chat_id: chat_id - config_id: null - caller_number: null - chat_start_type: new_chat_group - custom_session_id: null - start_time: 1 - docs: Sent when an EVI chat is started. - toolCall: - audiences: [] - method: POST - display-name: Tool Call - headers: {} - payload: root.WebhookEventToolCall - examples: - - payload: - chat_group_id: chat_group_id - chat_id: chat_id - config_id: null - caller_number: null - custom_session_id: null - timestamp: 1 - tool_call_message: - name: name - parameters: parameters - response_required: true - tool_call_id: tool_call_id - tool_type: builtin - docs: Sent when EVI triggers a tool call diff --git a/.mock/definition/empathic-voice/chats.yml b/.mock/definition/empathic-voice/chats.yml deleted file mode 100644 index bdfd3786..00000000 --- a/.mock/definition/empathic-voice/chats.yml +++ /dev/null @@ -1,506 +0,0 @@ -imports: - root: __package__.yml -service: - auth: false - base-path: '' - endpoints: - list-chats: - path: /v0/evi/chats - method: GET - docs: Fetches a paginated list of **Chats**. - pagination: - offset: $request.page_number - results: $response.chats_page - source: - openapi: evi-openapi.json - display-name: List chats - request: - name: ChatsListChatsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - config_id: - type: optional - docs: Filter to only include chats that used this config. - validation: - format: uuid - status: - type: optional - docs: Chat status to apply to the chat. String from the ChatStatus enum. - response: - docs: Success - type: root.ReturnPagedChats - status-code: 200 - errors: - - root.BadRequestError - examples: - - query-parameters: - page_number: 0 - page_size: 1 - ascending_order: true - response: - body: - page_number: 0 - page_size: 1 - total_pages: 1 - pagination_direction: ASC - chats_page: - - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f - status: USER_ENDED - start_timestamp: 1716244940648 - end_timestamp: 1716244958546 - event_count: 3 - metadata: '' - config: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - list-chat-events: - path: /v0/evi/chats/{id} - method: GET - docs: Fetches a paginated list of **Chat** events. - pagination: - offset: $request.page_number - results: $response.events_page - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Chat. Formatted as a UUID. - display-name: List chat events - request: - name: ChatsListChatEventsRequest - query-parameters: - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - response: - docs: Success - type: root.ReturnChatPagedEvents - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - query-parameters: - page_number: 0 - page_size: 3 - ascending_order: true - response: - body: - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f - status: USER_ENDED - start_timestamp: 1716244940648 - pagination_direction: ASC - events_page: - - id: 5d44bdbb-49a3-40fb-871d-32bf7e76efe7 - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244940762 - role: SYSTEM - type: SYSTEM_PROMPT - message_text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - emotion_features: '' - metadata: '' - - id: 5976ddf6-d093-4bb9-ba60-8f6c25832dde - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244956278 - role: USER - type: USER_MESSAGE - message_text: Hello. - emotion_features: >- - {"Admiration": 0.09906005859375, "Adoration": - 0.12213134765625, "Aesthetic Appreciation": - 0.05035400390625, "Amusement": 0.16552734375, "Anger": - 0.0037384033203125, "Anxiety": 0.010101318359375, "Awe": - 0.058197021484375, "Awkwardness": 0.10552978515625, - "Boredom": 0.1141357421875, "Calmness": 0.115234375, - "Concentration": 0.00444793701171875, "Confusion": - 0.0343017578125, "Contemplation": 0.00812530517578125, - "Contempt": 0.009002685546875, "Contentment": - 0.087158203125, "Craving": 0.00818634033203125, "Desire": - 0.018310546875, "Determination": 0.003238677978515625, - "Disappointment": 0.024169921875, "Disgust": - 0.00702667236328125, "Distress": 0.00936126708984375, - "Doubt": 0.00632476806640625, "Ecstasy": 0.0293731689453125, - "Embarrassment": 0.01800537109375, "Empathic Pain": - 0.0088348388671875, "Entrancement": 0.013397216796875, - "Envy": 0.02557373046875, "Excitement": 0.12109375, "Fear": - 0.004413604736328125, "Guilt": 0.016571044921875, "Horror": - 0.00274658203125, "Interest": 0.2142333984375, "Joy": - 0.29638671875, "Love": 0.16015625, "Nostalgia": - 0.007843017578125, "Pain": 0.007160186767578125, "Pride": - 0.00508880615234375, "Realization": 0.054229736328125, - "Relief": 0.048736572265625, "Romance": 0.026397705078125, - "Sadness": 0.0265350341796875, "Satisfaction": - 0.051361083984375, "Shame": 0.00974273681640625, "Surprise - (negative)": 0.0218963623046875, "Surprise (positive)": - 0.216064453125, "Sympathy": 0.021728515625, "Tiredness": - 0.0173797607421875, "Triumph": 0.004520416259765625} - metadata: >- - {"segments": [{"content": "Hello.", "embedding": - [0.6181640625, 0.1763916015625, -30.921875, 1.2705078125, - 0.927734375, 0.63720703125, 2.865234375, 0.1080322265625, - 0.2978515625, 1.0107421875, 1.34375, 0.74560546875, - 0.416259765625, 0.99462890625, -0.333740234375, - 0.361083984375, -1.388671875, 1.0107421875, 1.3173828125, - 0.55615234375, 0.541015625, -0.1837158203125, 1.697265625, - 0.228515625, 2.087890625, -0.311767578125, - 0.053680419921875, 1.3349609375, 0.95068359375, - 0.00441741943359375, 0.705078125, 1.8916015625, - -0.939453125, 0.93701171875, -0.28955078125, 1.513671875, - 0.5595703125, 1.0126953125, -0.1624755859375, 1.4072265625, - -0.28857421875, -0.4560546875, -0.1500244140625, - -0.1102294921875, -0.222412109375, 0.8779296875, - 1.275390625, 1.6689453125, 0.80712890625, -0.34814453125, - -0.325439453125, 0.412841796875, 0.81689453125, - 0.55126953125, 1.671875, 0.6611328125, 0.7451171875, - 1.50390625, 1.0224609375, -1.671875, 0.7373046875, - 2.1328125, 2.166015625, 0.41015625, -0.127685546875, - 1.9345703125, -4.2734375, 0.332275390625, 0.26171875, - 0.76708984375, 0.2685546875, 0.468017578125, 1.208984375, - -1.517578125, 1.083984375, 0.84814453125, 1.0244140625, - -0.0072174072265625, 1.34375, 1.0712890625, 1.517578125, - -0.52001953125, 0.59228515625, 0.8154296875, -0.951171875, - -0.07757568359375, 1.3330078125, 1.125, 0.61181640625, - 1.494140625, 0.357421875, 1.1796875, 1.482421875, 0.8046875, - 0.1536865234375, 1.8076171875, 0.68115234375, -15.171875, - 1.2294921875, 0.319091796875, 0.499755859375, 1.5771484375, - 0.94677734375, -0.2490234375, 0.88525390625, 3.47265625, - 0.75927734375, 0.71044921875, 1.2333984375, 1.4169921875, - -0.56640625, -1.8095703125, 1.37109375, 0.428955078125, - 1.89453125, -0.39013671875, 0.1734619140625, 1.5595703125, - -1.2294921875, 2.552734375, 0.58349609375, 0.2156982421875, - -0.00984954833984375, -0.6865234375, -0.0272979736328125, - -0.2264404296875, 2.853515625, 1.3896484375, 0.52978515625, - 0.783203125, 3.0390625, 0.75537109375, 0.219970703125, - 0.384521484375, 0.385986328125, 2.0546875, - -0.10443115234375, 1.5146484375, 1.4296875, 1.9716796875, - 1.1318359375, 0.31591796875, 0.338623046875, 1.654296875, - -0.88037109375, -0.21484375, 1.45703125, 1.0380859375, - -0.52294921875, -0.47802734375, 0.1650390625, 1.2392578125, - -1.138671875, 0.56787109375, 1.318359375, 0.4287109375, - 0.1981201171875, 2.4375, 0.281005859375, 0.89404296875, - -0.1552734375, 0.6474609375, -0.08331298828125, - 0.00740814208984375, -0.045501708984375, -0.578125, - 2.02734375, 0.59228515625, 0.35693359375, 1.2919921875, - 1.22265625, 1.0537109375, 0.145263671875, 1.05859375, - -0.369140625, 0.207275390625, 0.78857421875, 0.599609375, - 0.99072265625, 0.24462890625, 1.26953125, 0.08404541015625, - 1.349609375, 0.73291015625, 1.3212890625, 0.388916015625, - 1.0869140625, 0.9931640625, -1.5673828125, 0.0462646484375, - 0.650390625, 0.253662109375, 0.58251953125, 1.8134765625, - 0.8642578125, 2.591796875, 0.7314453125, 0.85986328125, - 0.5615234375, 0.9296875, 0.04144287109375, 1.66015625, - 1.99609375, 1.171875, 1.181640625, 1.5126953125, - 0.0224456787109375, 0.58349609375, -1.4931640625, - 0.81884765625, 0.732421875, -0.6455078125, -0.62451171875, - 1.7802734375, 0.01526641845703125, -0.423095703125, - 0.461669921875, 4.87890625, 1.2392578125, -0.6953125, - 0.6689453125, 0.62451171875, -1.521484375, 1.7685546875, - 0.810546875, 0.65478515625, 0.26123046875, 1.6396484375, - 0.87548828125, 1.7353515625, 2.046875, 1.5634765625, - 0.69384765625, 1.375, 0.8916015625, 1.0107421875, - 0.1304931640625, 2.009765625, 0.06402587890625, - -0.08428955078125, 0.04351806640625, -1.7529296875, - 2.02734375, 3.521484375, 0.404541015625, 1.6337890625, - -0.276611328125, 0.8837890625, -0.1287841796875, - 0.91064453125, 0.8193359375, 0.701171875, 0.036529541015625, - 1.26171875, 1.0478515625, -0.1422119140625, 1.0634765625, - 0.61083984375, 1.3505859375, 1.208984375, 0.57275390625, - 1.3623046875, 2.267578125, 0.484375, 0.9150390625, - 0.56787109375, -0.70068359375, 0.27587890625, - -0.70654296875, 0.8466796875, 0.57568359375, 1.6162109375, - 0.87939453125, 2.248046875, -0.5458984375, 1.7744140625, - 1.328125, 1.232421875, 0.6806640625, 0.9365234375, - 1.052734375, -1.08984375, 1.8330078125, -0.4033203125, - 1.0673828125, 0.297607421875, 1.5703125, 1.67578125, - 1.34765625, 2.8203125, 2.025390625, -0.48583984375, - 0.7626953125, 0.01007843017578125, 1.435546875, - 0.007205963134765625, 0.05157470703125, -0.9853515625, - 0.26708984375, 1.16796875, 1.2041015625, 1.99609375, - -0.07916259765625, 1.244140625, -0.32080078125, - 0.6748046875, 0.419921875, 1.3212890625, 1.291015625, - 0.599609375, 0.0550537109375, 0.9599609375, 0.93505859375, - 0.111083984375, 1.302734375, 0.0833740234375, 2.244140625, - 1.25390625, 1.6015625, 0.58349609375, 1.7568359375, - -0.263427734375, -0.019866943359375, -0.24658203125, - -0.1871337890625, 0.927734375, 0.62255859375, - 0.275146484375, 0.79541015625, 1.1796875, 1.1767578125, - -0.26123046875, -0.268310546875, 1.8994140625, 1.318359375, - 2.1875, 0.2469482421875, 1.41015625, 0.03973388671875, - 1.2685546875, 1.1025390625, 0.9560546875, 0.865234375, - -1.92578125, 1.154296875, 0.389892578125, 1.130859375, - 0.95947265625, 0.72314453125, 2.244140625, - 0.048553466796875, 0.626953125, 0.42919921875, - 0.82275390625, 0.311767578125, -0.320556640625, - 0.01041412353515625, 0.1483154296875, 0.10809326171875, - -0.3173828125, 1.1337890625, -0.8642578125, 1.4033203125, - 0.048828125, 1.1787109375, 0.98779296875, 1.818359375, - 1.1552734375, 0.6015625, 1.2392578125, -1.2685546875, - 0.39208984375, 0.83251953125, 0.224365234375, - 0.0019989013671875, 0.87548828125, 1.6572265625, - 1.107421875, 0.434814453125, 1.8251953125, 0.442626953125, - 1.2587890625, 0.09320068359375, -0.896484375, 1.8017578125, - 1.451171875, -0.0755615234375, 0.6083984375, 2.06640625, - 0.673828125, -0.33740234375, 0.192138671875, 0.21435546875, - 0.80224609375, -1.490234375, 0.9501953125, 0.86083984375, - -0.40283203125, 4.109375, 2.533203125, 1.2529296875, - 0.8271484375, 0.225830078125, 1.0478515625, -1.9755859375, - 0.841796875, 0.392822265625, 0.525390625, 0.33935546875, - -0.79443359375, 0.71630859375, 0.97998046875, - -0.175537109375, 0.97705078125, 1.705078125, 0.29638671875, - 0.68359375, 0.54150390625, 0.435791015625, 0.99755859375, - -0.369140625, 1.009765625, -0.140380859375, 0.426513671875, - 0.189697265625, 1.8193359375, 1.1201171875, -0.5009765625, - -0.331298828125, 0.759765625, -0.09442138671875, 0.74609375, - -1.947265625, 1.3544921875, -3.935546875, 2.544921875, - 1.359375, 0.1363525390625, 0.79296875, 0.79931640625, - -0.3466796875, 1.1396484375, -0.33447265625, 2.0078125, - -0.241455078125, 0.6318359375, 0.365234375, 0.296142578125, - 0.830078125, 1.0458984375, 0.5830078125, 0.61572265625, - 14.0703125, -2.0078125, -0.381591796875, 1.228515625, - 0.08282470703125, -0.67822265625, -0.04339599609375, - 0.397216796875, 0.1656494140625, 0.137451171875, - 0.244873046875, 1.1611328125, -1.3818359375, 0.8447265625, - 1.171875, 0.36328125, 0.252685546875, 0.1197509765625, - 0.232177734375, -0.020172119140625, 0.64404296875, - -0.01100921630859375, -1.9267578125, 0.222412109375, - 0.56005859375, 1.3046875, 1.1630859375, 1.197265625, - 1.02734375, 1.6806640625, -0.043731689453125, 1.4697265625, - 0.81201171875, 1.5390625, 1.240234375, -0.7353515625, - 1.828125, 1.115234375, 1.931640625, -0.517578125, - 0.77880859375, 1.0546875, 0.95361328125, 3.42578125, - 0.0160369873046875, 0.875, 0.56005859375, 1.2421875, - 1.986328125, 1.4814453125, 0.0948486328125, 1.115234375, - 0.00665283203125, 2.09375, 0.3544921875, -0.52783203125, - 1.2099609375, 0.45068359375, 0.65625, 0.1112060546875, - 1.0751953125, -0.9521484375, -0.30029296875, 1.4462890625, - 2.046875, 3.212890625, 1.68359375, 1.07421875, - -0.5263671875, 0.74560546875, 1.37890625, 0.15283203125, - 0.2440185546875, 0.62646484375, -0.1280517578125, - 0.7646484375, -0.515625, -0.35693359375, 1.2958984375, - 0.96923828125, 0.58935546875, 1.3701171875, 1.0673828125, - 0.2337646484375, 0.93115234375, 0.66357421875, 6.0, - 1.1025390625, -0.51708984375, -0.38330078125, 0.7197265625, - 0.246826171875, -0.45166015625, 1.9521484375, 0.5546875, - 0.08807373046875, 0.18505859375, 0.8857421875, - -0.57177734375, 0.251708984375, 0.234375, 2.57421875, - 0.9599609375, 0.5029296875, 0.10382080078125, - 0.08331298828125, 0.66748046875, -0.349609375, 1.287109375, - 0.259765625, 2.015625, 2.828125, -0.3095703125, - -0.164306640625, -0.3408203125, 0.486572265625, - 0.8466796875, 1.9130859375, 0.09088134765625, 0.66552734375, - 0.00972747802734375, -0.83154296875, 1.755859375, - 0.654296875, 0.173828125, 0.27587890625, -0.47607421875, - -0.264404296875, 0.7529296875, 0.6533203125, 0.7275390625, - 0.499755859375, 0.833984375, -0.44775390625, -0.05078125, - -0.454833984375, 0.75439453125, 0.68505859375, - 0.210693359375, -0.283935546875, -0.53564453125, - 0.96826171875, 0.861328125, -3.33984375, -0.26171875, - 0.77734375, 0.26513671875, -0.14111328125, -0.042236328125, - -0.84814453125, 0.2137451171875, 0.94921875, 0.65185546875, - -0.5380859375, 0.1529541015625, -0.360595703125, - -0.0333251953125, -0.69189453125, 0.8974609375, 0.7109375, - 0.81494140625, -0.259521484375, 1.1904296875, 0.62158203125, - 1.345703125, 0.89404296875, 0.70556640625, 1.0673828125, - 1.392578125, 0.5068359375, 0.962890625, 0.736328125, - 1.55078125, 0.50390625, -0.398681640625, 2.361328125, - 0.345947265625, -0.61962890625, 0.330078125, 0.75439453125, - -0.673828125, -0.2379150390625, 1.5673828125, 1.369140625, - 0.1119384765625, -0.1834716796875, 1.4599609375, - -0.77587890625, 0.5556640625, 0.09954833984375, - 0.0285186767578125, 0.58935546875, -0.501953125, - 0.212890625, 0.02679443359375, 0.1715087890625, - 0.03466796875, -0.564453125, 2.029296875, 2.45703125, - -0.72216796875, 2.138671875, 0.50830078125, - -0.09356689453125, 0.230224609375, 1.6943359375, - 1.5126953125, 0.39453125, 0.411376953125, 1.07421875, - -0.8046875, 0.51416015625, 0.2271728515625, -0.283447265625, - 0.38427734375, 0.73388671875, 0.6962890625, 1.4990234375, - 0.02813720703125, 0.40478515625, 1.2451171875, 1.1162109375, - -5.5703125, 0.76171875, 0.322021484375, 1.0361328125, - 1.197265625, 0.1163330078125, 0.2425537109375, 1.5595703125, - 1.5791015625, -0.0921630859375, 0.484619140625, - 1.9052734375, 5.31640625, 1.6337890625, 0.95947265625, - -0.1751708984375, 0.466552734375, 0.8330078125, 1.03125, - 0.2044677734375, 0.31298828125, -1.1220703125, 0.5517578125, - 0.93505859375, 0.45166015625, 1.951171875, 0.65478515625, - 1.30859375, 1.0859375, 0.56494140625, 2.322265625, - 0.242919921875, 1.81640625, -0.469970703125, -0.841796875, - 0.90869140625, 1.5361328125, 0.923828125, 1.0595703125, - 0.356689453125, -0.46142578125, 2.134765625, 1.3037109375, - -0.32373046875, -9.2265625, 0.4521484375, 0.88037109375, - -0.53955078125, 0.96484375, 0.7705078125, 0.84521484375, - 1.580078125, -0.1448974609375, 0.7607421875, 1.0166015625, - -0.086669921875, 1.611328125, 0.05938720703125, 0.5078125, - 0.8427734375, 2.431640625, 0.66357421875, 3.203125, - 0.132080078125, 0.461181640625, 0.779296875, 1.9482421875, - 1.8720703125, 0.845703125, -1.3837890625, -0.138916015625, - 0.35546875, 0.2457275390625, 0.75341796875, 1.828125, - 1.4169921875, 0.60791015625, 1.0068359375, 1.109375, - 0.484130859375, -0.302001953125, 0.4951171875, 0.802734375, - 1.9482421875, 0.916015625, 0.1646728515625, 2.599609375, - 1.7177734375, -0.2374267578125, 0.98046875, 0.39306640625, - -1.1396484375, 1.6533203125, 0.375244140625], "scores": - [0.09906005859375, 0.12213134765625, 0.05035400390625, - 0.16552734375, 0.0037384033203125, 0.010101318359375, - 0.058197021484375, 0.10552978515625, 0.1141357421875, - 0.115234375, 0.00444793701171875, 0.00812530517578125, - 0.0343017578125, 0.009002685546875, 0.087158203125, - 0.00818634033203125, 0.003238677978515625, 0.024169921875, - 0.00702667236328125, 0.00936126708984375, - 0.00632476806640625, 0.0293731689453125, 0.01800537109375, - 0.0088348388671875, 0.013397216796875, 0.02557373046875, - 0.12109375, 0.004413604736328125, 0.016571044921875, - 0.00274658203125, 0.2142333984375, 0.29638671875, - 0.16015625, 0.007843017578125, 0.007160186767578125, - 0.00508880615234375, 0.054229736328125, 0.048736572265625, - 0.026397705078125, 0.0265350341796875, 0.051361083984375, - 0.018310546875, 0.00974273681640625, 0.0218963623046875, - 0.216064453125, 0.021728515625, 0.0173797607421875, - 0.004520416259765625], "stoks": [52, 52, 52, 52, 52, 41, 41, - 374, 303, 303, 303, 427], "time": {"begin_ms": 640, - "end_ms": 1140}}]} - - id: 7645a0d1-2e64-410d-83a8-b96040432e9a - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244957031 - role: AGENT - type: AGENT_MESSAGE - message_text: Hello! - emotion_features: >- - {"Admiration": 0.044921875, "Adoration": 0.0253753662109375, - "Aesthetic Appreciation": 0.03265380859375, "Amusement": - 0.118408203125, "Anger": 0.06719970703125, "Anxiety": - 0.0411376953125, "Awe": 0.03802490234375, "Awkwardness": - 0.056549072265625, "Boredom": 0.04217529296875, "Calmness": - 0.08709716796875, "Concentration": 0.070556640625, - "Confusion": 0.06964111328125, "Contemplation": - 0.0343017578125, "Contempt": 0.037689208984375, - "Contentment": 0.059417724609375, "Craving": - 0.01132965087890625, "Desire": 0.01406097412109375, - "Determination": 0.1143798828125, "Disappointment": - 0.051177978515625, "Disgust": 0.028594970703125, "Distress": - 0.054901123046875, "Doubt": 0.04638671875, "Ecstasy": - 0.0258026123046875, "Embarrassment": 0.0222015380859375, - "Empathic Pain": 0.015777587890625, "Entrancement": - 0.0160980224609375, "Envy": 0.0163421630859375, - "Excitement": 0.129638671875, "Fear": 0.03125, "Guilt": - 0.01483917236328125, "Horror": 0.0194549560546875, - "Interest": 0.1341552734375, "Joy": 0.0738525390625, "Love": - 0.0216522216796875, "Nostalgia": 0.0210418701171875, "Pain": - 0.020721435546875, "Pride": 0.05499267578125, "Realization": - 0.0728759765625, "Relief": 0.04052734375, "Romance": - 0.0129241943359375, "Sadness": 0.0254669189453125, - "Satisfaction": 0.07159423828125, "Shame": 0.01495361328125, - "Surprise (negative)": 0.05560302734375, "Surprise - (positive)": 0.07965087890625, "Sympathy": - 0.022247314453125, "Tiredness": 0.0194549560546875, - "Triumph": 0.04107666015625} - metadata: '' - page_number: 0 - page_size: 3 - total_pages: 1 - end_timestamp: 1716244958546 - metadata: '' - config: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - get-audio: - path: /v0/evi/chats/{id}/audio - method: GET - docs: >- - Fetches the audio of a previous **Chat**. For more details, see our - guide on audio reconstruction - [here](/docs/speech-to-speech-evi/faq#can-i-access-the-audio-of-previous-conversations-with-evi). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a chat. Formatted as a UUID. - display-name: Get chat audio - response: - docs: Success - type: root.ReturnChatAudioReconstruction - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - response: - body: - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - user_id: e6235940-cfda-3988-9147-ff531627cf42 - status: COMPLETE - filename: >- - e6235940-cfda-3988-9147-ff531627cf42/470a49f6-1dec-4afe-8b61-035d3b2d63b0/reconstructed_audio.mp4 - modified_at: 1729875432555 - signed_audio_url: https://storage.googleapis.com/...etc. - signed_url_expiration_timestamp_millis: 1730232816964 - source: - openapi: evi-openapi.json diff --git a/.mock/definition/empathic-voice/configs.yml b/.mock/definition/empathic-voice/configs.yml deleted file mode 100644 index 06995d3c..00000000 --- a/.mock/definition/empathic-voice/configs.yml +++ /dev/null @@ -1,835 +0,0 @@ -imports: - root: __package__.yml -service: - auth: false - base-path: '' - endpoints: - list-configs: - path: /v0/evi/configs - method: GET - docs: >- - Fetches a paginated list of **Configs**. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - pagination: - offset: $request.page_number - results: $response.configs_page - source: - openapi: evi-openapi.json - display-name: List configs - request: - name: ConfigsListConfigsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each tool. To include all versions of - each tool in the list, set `restrict_to_most_recent` to false. - name: - type: optional - docs: Filter to only include configs with this name. - response: - docs: Success - type: root.ReturnPagedConfigs - status-code: 200 - errors: - - root.BadRequestError - examples: - - query-parameters: - page_number: 0 - page_size: 1 - response: - body: - page_number: 0 - page_size: 1 - total_pages: 1 - configs_page: - - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - version_description: '' - name: Weather Assistant Config - created_on: 1715267200693 - modified_on: 1715267200693 - evi_version: '3' - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to - user queries concisely and clearly. Use simple language - and avoid technical jargon. Provide temperature, - precipitation, wind conditions, and any weather alerts. - Include helpful tips if severe weather is expected. - voice: - provider: HUME_AI - name: Ava Song - id: 5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - create-config: - path: /v0/evi/configs - method: POST - docs: >- - Creates a **Config** which can be applied to EVI. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - source: - openapi: evi-openapi.json - display-name: Create config - request: - name: PostedConfig - body: - properties: - builtin_tools: - type: optional>> - docs: List of built-in tools associated with this Config. - ellm_model: - type: optional - docs: >- - The eLLM setup associated with this Config. - - - Hume's eLLM (empathic Large Language Model) is a multimodal - language model that takes into account both expression measures - and language. The eLLM generates short, empathic language - responses and guides text-to-speech (TTS) prosody. - event_messages: optional - evi_version: - type: string - docs: >- - EVI version to use. Only versions `3` and `4-mini` are - supported. - language_model: - type: optional - docs: >- - The supplemental language model associated with this Config. - - - This model is used to generate longer, more detailed responses - from EVI. Choosing an appropriate supplemental language model - for your use case is crucial for generating fast, high-quality - responses from EVI. - name: - type: string - docs: Name applied to all versions of a particular Config. - nudges: - type: optional - docs: >- - Configures nudges, brief audio prompts that can guide - conversations when users pause or need encouragement to continue - speaking. Nudges help create more natural, flowing interactions - by providing gentle conversational cues. - prompt: optional - timeouts: optional - tools: - type: optional>> - docs: List of user-defined tools associated with this Config. - version_description: - type: optional - docs: An optional description of the Config version. - voice: - type: optional - docs: A voice specification associated with this Config. - webhooks: - type: optional>> - docs: Webhook config specifications for each subscriber. - content-type: application/json - response: - docs: Created - type: root.ReturnConfig - status-code: 201 - errors: - - root.BadRequestError - examples: - - request: - name: Weather Assistant Config - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - evi_version: '3' - voice: - provider: HUME_AI - name: Ava Song - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - version_description: '' - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - evi_version: '3' - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: Ava Song - id: 5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - list-config-versions: - path: /v0/evi/configs/{id} - method: GET - docs: >- - Fetches a list of a **Config's** versions. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - pagination: - offset: $request.page_number - results: $response.configs_page - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: List config versions - request: - name: ConfigsListConfigVersionsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each config. To include all versions of - each config in the list, set `restrict_to_most_recent` to false. - response: - docs: Success - type: root.ReturnPagedConfigs - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - response: - body: - page_number: 0 - page_size: 10 - total_pages: 1 - configs_page: - - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - version_description: '' - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - evi_version: '3' - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to - user queries concisely and clearly. Use simple language - and avoid technical jargon. Provide temperature, - precipitation, wind conditions, and any weather alerts. - Include helpful tips if severe weather is expected. - voice: - provider: HUME_AI - name: Ava Song - id: 5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - create-config-version: - path: /v0/evi/configs/{id} - method: POST - docs: >- - Updates a **Config** by creating a new version of the **Config**. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: Create config version - request: - name: PostedConfigVersion - body: - properties: - builtin_tools: - type: optional>> - docs: List of built-in tools associated with this Config version. - ellm_model: - type: optional - docs: >- - The eLLM setup associated with this Config version. - - - Hume's eLLM (empathic Large Language Model) is a multimodal - language model that takes into account both expression measures - and language. The eLLM generates short, empathic language - responses and guides text-to-speech (TTS) prosody. - event_messages: optional - evi_version: - type: string - docs: The version of the EVI used with this config. - language_model: - type: optional - docs: >- - The supplemental language model associated with this Config - version. - - - This model is used to generate longer, more detailed responses - from EVI. Choosing an appropriate supplemental language model - for your use case is crucial for generating fast, high-quality - responses from EVI. - nudges: optional - prompt: optional - timeouts: optional - tools: - type: optional>> - docs: List of user-defined tools associated with this Config version. - version_description: - type: optional - docs: An optional description of the Config version. - voice: - type: optional - docs: A voice specification associated with this Config version. - webhooks: - type: optional>> - docs: Webhook config specifications for each subscriber. - content-type: application/json - response: - docs: Created - type: root.ReturnConfig - status-code: 201 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - request: - version_description: This is an updated version of the Weather Assistant Config. - evi_version: '3' - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - voice: - provider: HUME_AI - name: Ava Song - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - ellm_model: - allow_short_responses: true - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - version_description: This is an updated version of the Weather Assistant Config. - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1722642242998 - evi_version: '3' - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: Ava Song - id: 5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - ellm_model: - allow_short_responses: true - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - delete-config: - path: /v0/evi/configs/{id} - method: DELETE - docs: >- - Deletes a **Config** and its versions. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: Delete config - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - update-config-name: - path: /v0/evi/configs/{id} - method: PATCH - docs: >- - Updates the name of a **Config**. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: Update config name - request: - name: PostedConfigName - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Config. - content-type: application/json - response: - docs: Success - type: text - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - request: - name: Updated Weather Assistant Config Name - get-config-version: - path: /v0/evi/configs/{id}/version/{version} - method: GET - docs: >- - Fetches a specified version of a **Config**. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Config. - - - Configs, Prompts, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions - if needed. - - - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - display-name: Get config version - response: - docs: Success - type: root.ReturnConfig - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - version_description: '' - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - evi_version: '3' - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: Ava Song - id: 5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - delete-config-version: - path: /v0/evi/configs/{id}/version/{version} - method: DELETE - docs: >- - Deletes a specified version of a **Config**. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Config. - - - Configs, Prompts, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions - if needed. - - - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - display-name: Delete config version - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - update-config-description: - path: /v0/evi/configs/{id}/version/{version} - method: PATCH - docs: >- - Updates the description of a **Config**. - - - For more details on configuration options and how to configure EVI, see - our [configuration guide](/docs/speech-to-speech-evi/configuration). - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Config. - - - Configs, Prompts, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions - if needed. - - - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - display-name: Update config description - request: - name: PostedConfigVersionDescription - body: - properties: - version_description: - type: optional - docs: An optional description of the Config version. - content-type: application/json - response: - docs: Success - type: root.ReturnConfig - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - request: - version_description: This is an updated version_description. - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - version_description: This is an updated version_description. - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - evi_version: '3' - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: Ava Song - id: 5bb7de05-c8fe-426a-8fcc-ba4fc4ce9f9c - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-7-sonnet-latest - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: '' - on_inactivity_timeout: - enabled: false - text: '' - on_max_duration_timeout: - enabled: false - text: '' - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - source: - openapi: evi-openapi.json diff --git a/.mock/definition/empathic-voice/controlPlane.yml b/.mock/definition/empathic-voice/controlPlane.yml deleted file mode 100644 index 83d760a1..00000000 --- a/.mock/definition/empathic-voice/controlPlane.yml +++ /dev/null @@ -1,72 +0,0 @@ -imports: - root: __package__.yml -service: - auth: false - base-path: '' - endpoints: - send: - path: /v0/evi/chat/{chat_id}/send - method: POST - docs: Send a message to a specific chat. - source: - openapi: evi-openapi.json - path-parameters: - chat_id: string - display-name: Send Message - request: - body: root.ControlPlanePublishEvent - content-type: application/json - errors: - - root.UnprocessableEntityError - examples: - - path-parameters: - chat_id: chat_id - request: - type: session_settings - source: - openapi: evi-openapi.json -channel: - path: /chat/{chat_id}/connect - url: evi - auth: false - display-name: Control Plane - docs: >- - Connects to an in-progress EVI chat session. The original chat must have - been started with `allow_connection=true`. The connection can be used to - send and receive the same messages as the original chat, with the exception - that `audio_input` messages are not allowed. - path-parameters: - chat_id: - type: string - docs: The ID of the chat to connect to. - query-parameters: - access_token: - type: optional - default: '' - docs: >- - Access token used for authenticating the client. If not provided, an - `api_key` must be provided to authenticate. - - - The access token is generated using both an API key and a Secret key, - which provides an additional layer of security compared to using just an - API key. - - - For more details, refer to the [Authentication Strategies - Guide](/docs/introduction/api-key#authentication-strategies). - messages: - publish: - origin: client - body: root.ControlPlanePublishEvent - subscribe: - origin: server - body: root.SubscribeEvent - examples: - - messages: - - type: publish - body: - type: session_settings - - type: subscribe - body: - type: assistant_end diff --git a/.mock/definition/empathic-voice/prompts.yml b/.mock/definition/empathic-voice/prompts.yml deleted file mode 100644 index 10898960..00000000 --- a/.mock/definition/empathic-voice/prompts.yml +++ /dev/null @@ -1,549 +0,0 @@ -imports: - root: __package__.yml -service: - auth: false - base-path: '' - endpoints: - list-prompts: - path: /v0/evi/prompts - method: GET - docs: >- - Fetches a paginated list of **Prompts**. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - pagination: - offset: $request.page_number - results: $response.prompts_page - source: - openapi: evi-openapi.json - display-name: List prompts - request: - name: PromptsListPromptsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: Only include the most recent version of each prompt in the list. - name: - type: optional - docs: Filter to only include prompts with name. - response: - docs: Success - type: root.ReturnPagedPrompts - status-code: 200 - errors: - - root.BadRequestError - examples: - - query-parameters: - page_number: 0 - page_size: 2 - response: - body: - page_number: 0 - page_size: 2 - total_pages: 1 - prompts_page: - - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - - id: 616b2b4c-a096-4445-9c23-64058b564fc2 - version: 0 - version_type: FIXED - version_description: '' - name: Web Search Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI web search assistant designed to help - users find accurate and relevant information on the web. - Respond to user queries promptly, using the built-in web - search tool to retrieve up-to-date results. Present - information clearly and concisely, summarizing key points - where necessary. Use simple language and avoid technical - jargon. If needed, provide helpful tips for refining search - queries to obtain better results. - create-prompt: - path: /v0/evi/prompts - method: POST - docs: >- - Creates a **Prompt** that can be added to an [EVI - configuration](/reference/speech-to-speech-evi/configs/create-config). - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - display-name: Create prompt - request: - name: PostedPrompt - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Prompt. - text: - type: string - docs: >- - Instructions used to shape EVI's behavior, responses, and style. - - - You can use the Prompt to define a specific goal or role for - EVI, specifying how it should act or what it should focus on - during the conversation. For example, EVI can be instructed to - act as a customer support representative, a fitness coach, or a - travel advisor, each with its own set of behaviors and response - styles. - - - For help writing a system prompt, see our [Prompting - Guide](/docs/speech-to-speech-evi/guides/prompting). - version_description: - type: optional - docs: An optional description of the Prompt version. - content-type: application/json - response: - docs: Created - type: optional - status-code: 201 - errors: - - root.BadRequestError - examples: - - request: - name: Weather Assistant Prompt - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if severe - weather is expected. - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: null - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722633247488 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - list-prompt-versions: - path: /v0/evi/prompts/{id} - method: GET - docs: >- - Fetches a list of a **Prompt's** versions. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: List prompt versions - request: - name: PromptsListPromptVersionsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each prompt. To include all versions of - each prompt in the list, set `restrict_to_most_recent` to false. - response: - docs: Success - type: root.ReturnPagedPrompts - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - response: - body: - page_number: 0 - page_size: 10 - total_pages: 1 - prompts_page: - - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722633247488 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - create-prompt-version: - path: /v0/evi/prompts/{id} - method: POST - docs: >- - Updates a **Prompt** by creating a new version of the **Prompt**. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: Create prompt version - request: - name: PostedPromptVersion - body: - properties: - text: - type: string - docs: >- - Instructions used to shape EVI's behavior, responses, and style - for this version of the Prompt. - - - You can use the Prompt to define a specific goal or role for - EVI, specifying how it should act or what it should focus on - during the conversation. For example, EVI can be instructed to - act as a customer support representative, a fitness coach, or a - travel advisor, each with its own set of behaviors and response - styles. - - - For help writing a system prompt, see our [Prompting - Guide](/docs/speech-to-speech-evi/guides/prompting). - version_description: - type: optional - docs: An optional description of the Prompt version. - content-type: application/json - response: - docs: Created - type: optional - status-code: 201 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - request: - text: >- - You are an updated version of an AI weather assistant - providing users with accurate and up-to-date weather information. - Respond to user queries concisely and clearly. Use simple language - and avoid technical jargon. Provide temperature, precipitation, - wind conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - version_description: This is an updated version of the Weather Assistant Prompt. - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - version_type: FIXED - version_description: This is an updated version of the Weather Assistant Prompt. - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722635140150 - text: >- - You are an updated version of an AI weather assistant - providing users with accurate and up-to-date weather - information. Respond to user queries concisely and clearly. Use - simple language and avoid technical jargon. Provide temperature, - precipitation, wind conditions, and any weather alerts. Include - helpful tips if severe weather is expected. - delete-prompt: - path: /v0/evi/prompts/{id} - method: DELETE - docs: >- - Deletes a **Prompt** and its versions. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: Delete prompt - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - update-prompt-name: - path: /v0/evi/prompts/{id} - method: PATCH - docs: >- - Updates the name of a **Prompt**. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: Update prompt name - request: - name: PostedPromptName - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Prompt. - content-type: application/json - response: - docs: Success - type: text - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - request: - name: Updated Weather Assistant Prompt Name - get-prompt-version: - path: /v0/evi/prompts/{id}/version/{version} - method: GET - docs: >- - Fetches a specified version of a **Prompt**. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Prompt. - - - Prompts, Configs, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine prompts and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - display-name: Get prompt version - response: - docs: Success - type: optional - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: '' - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722633247488 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - delete-prompt-version: - path: /v0/evi/prompts/{id}/version/{version} - method: DELETE - docs: >- - Deletes a specified version of a **Prompt**. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Prompt. - - - Prompts, Configs, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine prompts and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - display-name: Delete prompt version - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - update-prompt-description: - path: /v0/evi/prompts/{id}/version/{version} - method: PATCH - docs: >- - Updates the description of a **Prompt**. - - - See our [prompting - guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on - crafting your system prompt. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Prompt. - - - Prompts, Configs, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine prompts and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - display-name: Update prompt description - request: - name: PostedPromptVersionDescription - body: - properties: - version_description: - type: optional - docs: An optional description of the Prompt version. - content-type: application/json - response: - docs: Success - type: optional - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - request: - version_description: This is an updated version_description. - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - version_type: FIXED - version_description: This is an updated version_description. - name: string - created_on: 1722633247488 - modified_on: 1722634770585 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - source: - openapi: evi-openapi.json diff --git a/.mock/definition/empathic-voice/tools.yml b/.mock/definition/empathic-voice/tools.yml deleted file mode 100644 index 646e997e..00000000 --- a/.mock/definition/empathic-voice/tools.yml +++ /dev/null @@ -1,617 +0,0 @@ -imports: - root: __package__.yml -service: - auth: false - base-path: '' - endpoints: - list-tools: - path: /v0/evi/tools - method: GET - docs: >- - Fetches a paginated list of **Tools**. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - pagination: - offset: $request.page_number - results: $response.tools_page - source: - openapi: evi-openapi.json - display-name: List tools - request: - name: ToolsListToolsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each tool. To include all versions of - each tool in the list, set `restrict_to_most_recent` to false. - name: - type: optional - docs: Filter to only include tools with name. - response: - docs: Success - type: root.ReturnPagedUserDefinedTools - status-code: 200 - errors: - - root.BadRequestError - examples: - - query-parameters: - page_number: 0 - page_size: 2 - response: - body: - page_number: 0 - page_size: 2 - total_pages: 1 - tools_page: - - tool_type: FUNCTION - id: d20827af-5d8d-4f66-b6b9-ce2e3e1ea2b2 - version: 0 - version_type: FIXED - version_description: Fetches user's current location. - name: get_current_location - created_on: 1715267200693 - modified_on: 1715267200693 - fallback_content: Unable to fetch location. - description: Fetches user's current location. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }}, "required": ["location"] } - - tool_type: FUNCTION - id: 4442f3ea-9038-40e3-a2ce-1522b7de770f - version: 0 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius or fahrenheit based - on location of user. - name: get_current_weather - created_on: 1715266126705 - modified_on: 1715266126705 - fallback_content: Unable to fetch location. - description: >- - Fetches current weather and uses celsius or fahrenheit based - on location of user. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit"], "description": "The temperature - unit to use. Infer this from the users location." } }, - "required": ["location", "format"] } - create-tool: - path: /v0/evi/tools - method: POST - docs: >- - Creates a **Tool** that can be added to an [EVI - configuration](/reference/speech-to-speech-evi/configs/create-config). - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - source: - openapi: evi-openapi.json - display-name: Create tool - request: - name: PostedUserDefinedTool - body: - properties: - description: - type: optional - docs: >- - An optional description of what the Tool does, used by the - supplemental LLM to choose when and how to call the function. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the - tool call result. The LLM then uses this text to generate a - response back to the user, ensuring continuity in the - conversation if the Tool errors. - name: - type: string - docs: Name applied to all versions of a particular Tool. - parameters: - type: string - docs: >- - Stringified JSON defining the parameters used by this version of - the Tool. - - - These parameters define the inputs needed for the Tool's - execution, including the expected data type and description for - each input field. Structured as a stringified JSON schema, this - format ensures the Tool receives data in the expected format. - version_description: - type: optional - docs: An optional description of the Tool version. - content-type: application/json - response: - docs: Created - type: optional - status-code: 201 - errors: - - root.BadRequestError - examples: - - request: - name: get_current_weather - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San Francisco, - CA" }, "format": { "type": "string", "enum": ["celsius", - "fahrenheit"], "description": "The temperature unit to use. Infer - this from the users location." } }, "required": ["location", - "format"] } - version_description: >- - Fetches current weather and uses celsius or fahrenheit based on - location of user. - description: This tool is for getting the current weather. - fallback_content: Unable to fetch current weather. - response: - body: - tool_type: FUNCTION - id: aa9b71c4-723c-47ff-9f83-1a1829e74376 - version: 0 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius or fahrenheit based on - location of user. - name: get_current_weather - created_on: 1715275452390 - modified_on: 1715275452390 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit"], "description": "The temperature unit - to use. Infer this from the users location." } }, "required": - ["location", "format"] } - list-tool-versions: - path: /v0/evi/tools/{id} - method: GET - docs: >- - Fetches a list of a **Tool's** versions. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - pagination: - offset: $request.page_number - results: $response.tools_page - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: List tool versions - request: - name: ToolsListToolVersionsRequest - query-parameters: - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each tool. To include all versions of - each tool in the list, set `restrict_to_most_recent` to false. - response: - docs: Success - type: root.ReturnPagedUserDefinedTools - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - response: - body: - page_number: 0 - page_size: 10 - total_pages: 1 - tools_page: - - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or - kelvin based on location of user. - name: get_current_weather - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users - location." } }, "required": ["location", "format"] } - create-tool-version: - path: /v0/evi/tools/{id} - method: POST - docs: >- - Updates a **Tool** by creating a new version of the **Tool**. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: Create tool version - request: - name: PostedUserDefinedToolVersion - body: - properties: - description: - type: optional - docs: >- - An optional description of what the Tool does, used by the - supplemental LLM to choose when and how to call the function. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the - tool call result. The LLM then uses this text to generate a - response back to the user, ensuring continuity in the - conversation if the Tool errors. - parameters: - type: string - docs: >- - Stringified JSON defining the parameters used by this version of - the Tool. - - - These parameters define the inputs needed for the Tool's - execution, including the expected data type and description for - each input field. Structured as a stringified JSON schema, this - format ensures the Tool receives data in the expected format. - version_description: - type: optional - docs: An optional description of the Tool version. - content-type: application/json - response: - docs: Created - type: optional - status-code: 201 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - request: - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San Francisco, - CA" }, "format": { "type": "string", "enum": ["celsius", - "fahrenheit", "kelvin"], "description": "The temperature unit to - use. Infer this from the users location." } }, "required": - ["location", "format"] } - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or kelvin - based on location of user. - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - response: - body: - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or kelvin - based on location of user. - name: get_current_weather - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users location." } - }, "required": ["location", "format"] } - delete-tool: - path: /v0/evi/tools/{id} - method: DELETE - docs: >- - Deletes a **Tool** and its versions. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: Delete tool - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - update-tool-name: - path: /v0/evi/tools/{id} - method: PATCH - docs: >- - Updates the name of a **Tool**. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: Update tool name - request: - name: PostedUserDefinedToolName - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Tool. - content-type: application/json - response: - docs: Success - type: text - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - request: - name: get_current_temperature - get-tool-version: - path: /v0/evi/tools/{id}/version/{version} - method: GET - docs: >- - Fetches a specified version of a **Tool**. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Tool. - - - Tools, Configs, Custom Voices, and Prompts are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - display-name: Get tool version - response: - docs: Success - type: optional - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - response: - body: - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or kelvin - based on location of user. - name: string - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users location." } - }, "required": ["location", "format"] } - delete-tool-version: - path: /v0/evi/tools/{id}/version/{version} - method: DELETE - docs: >- - Deletes a specified version of a **Tool**. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Tool. - - - Tools, Configs, Custom Voices, and Prompts are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - display-name: Delete tool version - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - update-tool-description: - path: /v0/evi/tools/{id}/version/{version} - method: PATCH - docs: >- - Updates the description of a specified **Tool** version. - - - Refer to our [tool - use](/docs/speech-to-speech-evi/features/tool-use#function-calling) - guide for comprehensive instructions on defining and integrating tools - into EVI. - source: - openapi: evi-openapi.json - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Tool. - - - Tools, Configs, Custom Voices, and Prompts are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - display-name: Update tool description - request: - name: PostedUserDefinedToolVersionDescription - body: - properties: - version_description: - type: optional - docs: An optional description of the Tool version. - content-type: application/json - response: - docs: Success - type: optional - status-code: 200 - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - request: - version_description: >- - Fetches current temperature, precipitation, wind speed, AQI, and - other weather conditions. Uses Celsius, Fahrenheit, or kelvin - depending on user's region. - response: - body: - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current temperature, precipitation, wind speed, AQI, and - other weather conditions. Uses Celsius, Fahrenheit, or kelvin - depending on user's region. - name: string - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users location." } - }, "required": ["location", "format"] } - source: - openapi: evi-openapi.json diff --git a/.mock/definition/expression-measurement/__package__.yml b/.mock/definition/expression-measurement/__package__.yml deleted file mode 100644 index 0967ef42..00000000 --- a/.mock/definition/expression-measurement/__package__.yml +++ /dev/null @@ -1 +0,0 @@ -{} diff --git a/.mock/definition/expression-measurement/batch/__package__.yml b/.mock/definition/expression-measurement/batch/__package__.yml deleted file mode 100644 index 2d224469..00000000 --- a/.mock/definition/expression-measurement/batch/__package__.yml +++ /dev/null @@ -1,1799 +0,0 @@ -service: - auth: false - base-path: '' - endpoints: - list-jobs: - path: /v0/batch/jobs - method: GET - docs: Sort and filter jobs. - source: - openapi: batch-openapi.json - display-name: List jobs - request: - name: BatchListJobsRequest - query-parameters: - limit: - type: optional - default: 50 - docs: The maximum number of jobs to include in the response. - status: - type: optional - allow-multiple: true - docs: >- - Include only jobs of this status in the response. There are four - possible statuses: - - - - `QUEUED`: The job has been received and is waiting to be - processed. - - - - `IN_PROGRESS`: The job is currently being processed. - - - - `COMPLETED`: The job has finished processing. - - - - `FAILED`: The job encountered an error and could not be - completed successfully. - when: - type: optional - docs: >- - Specify whether to include jobs created before or after a given - `timestamp_ms`. - timestamp_ms: - type: optional - default: 1704319392247 - docs: |- - Provide a timestamp in milliseconds to filter jobs. - - When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. - sort_by: - type: optional - docs: >- - Specify which timestamp to sort the jobs by. - - - - `created`: Sort jobs by the time of creation, indicated by - `created_timestamp_ms`. - - - - `started`: Sort jobs by the time processing started, indicated - by `started_timestamp_ms`. - - - - `ended`: Sort jobs by the time processing ended, indicated by - `ended_timestamp_ms`. - direction: - type: optional - docs: >- - Specify the order in which to sort the jobs. Defaults to - descending order. - - - - `asc`: Sort in ascending order (chronological, with the oldest - records first). - - - - `desc`: Sort in descending order (reverse-chronological, with - the newest records first). - response: - docs: '' - type: list - status-code: 200 - examples: - - response: - body: - - job_id: job_id - request: - callback_url: null - files: - - filename: filename - md5sum: md5sum - content_type: content_type - models: - burst: {} - face: - descriptions: null - facs: null - fps_pred: 3 - identify_faces: false - min_face_size: 60 - prob_threshold: 0.99 - save_faces: false - facemesh: {} - language: - granularity: word - identify_speakers: false - sentiment: null - toxicity: null - ner: - identify_speakers: false - prosody: - granularity: utterance - identify_speakers: false - window: null - notify: true - text: [] - urls: - - https://hume-tutorials.s3.amazonaws.com/faces.zip - state: - created_timestamp_ms: 1712587158717 - ended_timestamp_ms: 1712587159274 - num_errors: 0 - num_predictions: 10 - started_timestamp_ms: 1712587158800 - status: COMPLETED - type: INFERENCE - start-inference-job: - path: /v0/batch/jobs - method: POST - docs: Start a new measurement inference job. - source: - openapi: batch-openapi.json - display-name: Start inference job - request: - body: InferenceBaseRequest - content-type: application/json - response: - docs: '' - type: JobId - status-code: 200 - property: job_id - examples: - - request: - urls: - - https://hume-tutorials.s3.amazonaws.com/faces.zip - notify: true - response: - body: - job_id: job_id - get-job-details: - path: /v0/batch/jobs/{id} - method: GET - docs: Get the request details and state of a given job. - source: - openapi: batch-openapi.json - path-parameters: - id: - type: string - docs: The unique identifier for the job. - display-name: Get job details - response: - docs: '' - type: UnionJob - status-code: 200 - examples: - - name: Inference - path-parameters: - id: job_id - response: - body: - type: INFERENCE - job_id: job_id - request: - callback_url: null - files: [] - models: - burst: {} - face: - descriptions: null - facs: null - fps_pred: 3 - identify_faces: false - min_face_size: 60 - prob_threshold: 0.99 - save_faces: false - facemesh: {} - language: - granularity: word - identify_speakers: false - sentiment: null - toxicity: null - ner: - identify_speakers: false - prosody: - granularity: utterance - identify_speakers: false - window: null - notify: true - text: [] - urls: - - https://hume-tutorials.s3.amazonaws.com/faces.zip - state: - created_timestamp_ms: 1712590457884 - ended_timestamp_ms: 1712590462252 - num_errors: 0 - num_predictions: 10 - started_timestamp_ms: 1712590457995 - status: COMPLETED - get-job-predictions: - path: /v0/batch/jobs/{id}/predictions - method: GET - docs: Get the JSON predictions of a completed inference job. - source: - openapi: batch-openapi.json - path-parameters: - id: - type: string - docs: The unique identifier for the job. - display-name: Get job predictions - response: - docs: '' - type: list - status-code: 200 - examples: - - path-parameters: - id: job_id - response: - body: - - source: - type: url - url: https://hume-tutorials.s3.amazonaws.com/faces.zip - results: - predictions: - - file: faces/100.jpg - models: - face: - metadata: null - grouped_predictions: - - id: unknown - predictions: - - frame: 0 - time: 0 - prob: 0.9994111061096191 - box: - x: 1187.885986328125 - 'y': 1397.697509765625 - w: 1401.668701171875 - h: 1961.424560546875 - emotions: - - name: Admiration - score: 0.10722749680280685 - - name: Adoration - score: 0.06395940482616425 - - name: Aesthetic Appreciation - score: 0.05811462551355362 - - name: Amusement - score: 0.14187128841876984 - - name: Anger - score: 0.02804684266448021 - - name: Anxiety - score: 0.2713485360145569 - - name: Awe - score: 0.33812594413757324 - - name: Awkwardness - score: 0.1745193600654602 - - name: Boredom - score: 0.23600080609321594 - - name: Calmness - score: 0.18988418579101562 - - name: Concentration - score: 0.44288986921310425 - - name: Confusion - score: 0.39346569776535034 - - name: Contemplation - score: 0.31002455949783325 - - name: Contempt - score: 0.048870109021663666 - - name: Contentment - score: 0.0579497292637825 - - name: Craving - score: 0.06544201076030731 - - name: Desire - score: 0.05526508390903473 - - name: Determination - score: 0.08590991795063019 - - name: Disappointment - score: 0.19508258998394012 - - name: Disgust - score: 0.031529419124126434 - - name: Distress - score: 0.23210826516151428 - - name: Doubt - score: 0.3284550905227661 - - name: Ecstasy - score: 0.040716782212257385 - - name: Embarrassment - score: 0.1467227339744568 - - name: Empathic Pain - score: 0.07633581757545471 - - name: Entrancement - score: 0.16245244443416595 - - name: Envy - score: 0.03267110139131546 - - name: Excitement - score: 0.10656816512346268 - - name: Fear - score: 0.3115977346897125 - - name: Guilt - score: 0.11615975946187973 - - name: Horror - score: 0.19795553386211395 - - name: Interest - score: 0.3136432468891144 - - name: Joy - score: 0.06285581737756729 - - name: Love - score: 0.06339752674102783 - - name: Nostalgia - score: 0.05866732448339462 - - name: Pain - score: 0.07684041559696198 - - name: Pride - score: 0.026822954416275024 - - name: Realization - score: 0.30000734329223633 - - name: Relief - score: 0.04414166510105133 - - name: Romance - score: 0.042728863656520844 - - name: Sadness - score: 0.14773206412792206 - - name: Satisfaction - score: 0.05902980640530586 - - name: Shame - score: 0.08103451132774353 - - name: Surprise (negative) - score: 0.25518184900283813 - - name: Surprise (positive) - score: 0.28845661878585815 - - name: Sympathy - score: 0.062488824129104614 - - name: Tiredness - score: 0.1559651643037796 - - name: Triumph - score: 0.01955239288508892 - facs: null - descriptions: null - errors: [] - get-job-artifacts: - path: /v0/batch/jobs/{id}/artifacts - method: GET - docs: Get the artifacts ZIP of a completed inference job. - source: - openapi: batch-openapi.json - path-parameters: - id: - type: string - docs: The unique identifier for the job. - display-name: Get job artifacts - response: - docs: '' - type: file - status-code: 200 - start-inference-job-from-local-file: - path: /v0/batch/jobs - method: POST - auth: - - BearerAuth: [] - docs: Start a new batch inference job. - source: - openapi: batch-files-openapi.yml - display-name: Start inference job from local file - request: - name: BatchStartInferenceJobFromLocalFileRequest - body: - properties: - json: - type: optional - docs: >- - Stringified JSON object containing the inference job - configuration. - file: - type: list - docs: >- - Local media files (see recommended input filetypes) to be - processed. - - - If you wish to supply more than 100 files, consider providing - them as an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). - content-type: multipart/form-data - response: - docs: '' - type: JobId - status-code: 200 - property: job_id - examples: - - request: {} - response: - body: - job_id: job_id - source: - openapi: batch-files-openapi.yml -types: - Alternative: literal<"language_only"> - Bcp47Tag: - enum: - - zh - - da - - nl - - en - - value: en-AU - name: EnAu - - value: en-IN - name: EnIn - - value: en-NZ - name: EnNz - - value: en-GB - name: EnGb - - fr - - value: fr-CA - name: FrCa - - de - - hi - - value: hi-Latn - name: HiLatn - - id - - it - - ja - - ko - - 'no' - - pl - - pt - - value: pt-BR - name: PtBr - - value: pt-PT - name: PtPt - - ru - - es - - value: es-419 - name: Es419 - - sv - - ta - - tr - - uk - source: - openapi: batch-files-openapi.yml - BoundingBox: - docs: A bounding box around a face. - properties: - x: - type: double - docs: x-coordinate of bounding box top left corner. - 'y': - type: double - docs: y-coordinate of bounding box top left corner. - w: - type: double - docs: Bounding box width. - h: - type: double - docs: Bounding box height. - source: - openapi: batch-openapi.json - BurstPrediction: - properties: - time: TimeInterval - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - descriptions: - docs: Modality-specific descriptive features and their scores. - type: list - source: - openapi: batch-openapi.json - Classification: map - CompletedEmbeddingGeneration: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - source: - openapi: batch-openapi.json - CompletedInference: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - num_predictions: - type: uint64 - docs: The number of predictions that were generated by this job. - num_errors: - type: uint64 - docs: The number of errors that occurred while running this job. - source: - openapi: batch-openapi.json - CompletedTlInference: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - num_predictions: - type: uint64 - docs: The number of predictions that were generated by this job. - num_errors: - type: uint64 - docs: The number of errors that occurred while running this job. - source: - openapi: batch-openapi.json - CompletedTraining: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - custom_model: TrainingCustomModel - alternatives: optional> - source: - openapi: batch-openapi.json - CustomModelPrediction: - properties: - output: map - error: string - task_type: string - source: - openapi: batch-openapi.json - CustomModelRequest: - properties: - name: string - description: optional - tags: optional> - source: - openapi: batch-openapi.json - Dataset: - discriminated: false - union: - - DatasetId - - DatasetVersionId - source: - openapi: batch-openapi.json - DatasetId: - properties: - id: - type: string - validation: - format: uuid - source: - openapi: batch-openapi.json - DatasetVersionId: - properties: - version_id: - type: string - validation: - format: uuid - source: - openapi: batch-openapi.json - DescriptionsScore: - properties: - name: - type: string - docs: Name of the descriptive feature being expressed. - score: - type: float - docs: Embedding value for the descriptive feature being expressed. - source: - openapi: batch-openapi.json - Direction: - enum: - - asc - - desc - source: - openapi: batch-openapi.json - EmbeddingGenerationBaseRequest: - properties: - registry_file_details: - type: optional> - docs: File ID and File URL pairs for an asset registry file - source: - openapi: batch-openapi.json - EmotionScore: - properties: - name: - type: string - docs: Name of the emotion being expressed. - score: - type: float - docs: Embedding value for the emotion being expressed. - source: - openapi: batch-openapi.json - Error: - properties: - message: - type: string - docs: An error message. - file: - type: string - docs: A file path relative to the top level source URL or file. - source: - openapi: batch-openapi.json - EvaluationArgs: - properties: - validation: optional - source: - openapi: batch-openapi.json - Face: - docs: >- - The Facial Emotional Expression model analyzes human facial expressions in - images and videos. Results will be provided per frame for video files. - - - Recommended input file types: `.png`, `.jpeg`, `.mp4` - properties: - fps_pred: - type: optional - docs: >- - Number of frames per second to process. Other frames will be omitted - from the response. Set to `0` to process every frame. - default: 3 - prob_threshold: - type: optional - docs: >- - Face detection probability threshold. Faces detected with a - probability less than this threshold will be omitted from the - response. - default: 0.99 - validation: - min: 0 - max: 1 - identify_faces: - type: optional - docs: >- - Whether to return identifiers for faces across frames. If `true`, - unique identifiers will be assigned to face bounding boxes to - differentiate different faces. If `false`, all faces will be tagged - with an `unknown` ID. - default: false - min_face_size: - type: optional - docs: >- - Minimum bounding box side length in pixels to treat as a face. Faces - detected with a bounding box side length in pixels less than this - threshold will be omitted from the response. - facs: optional - descriptions: optional - save_faces: - type: optional - docs: >- - Whether to extract and save the detected faces in the artifacts zip - created by each job. - default: false - source: - openapi: batch-files-openapi.yml - FacePrediction: - properties: - frame: - type: uint64 - docs: Frame number - time: - type: double - docs: Time in seconds when face detection occurred. - prob: - type: double - docs: The predicted probability that a detected face was actually a face. - box: BoundingBox - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - facs: - type: optional> - docs: FACS 2.0 features and their scores. - descriptions: - type: optional> - docs: Modality-specific descriptive features and their scores. - source: - openapi: batch-openapi.json - FacemeshPrediction: - properties: - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - source: - openapi: batch-openapi.json - FacsScore: - properties: - name: - type: string - docs: Name of the FACS 2.0 feature being expressed. - score: - type: float - docs: Embedding value for the FACS 2.0 feature being expressed. - source: - openapi: batch-openapi.json - Failed: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - message: - type: string - docs: An error message. - source: - openapi: batch-openapi.json - File: - docs: The list of files submitted for analysis. - properties: - filename: - type: optional - docs: The name of the file. - content_type: - type: optional - docs: The content type of the file. - md5sum: - type: string - docs: The MD5 checksum of the file. - source: - openapi: batch-openapi.json - Granularity: - enum: - - word - - sentence - - utterance - - conversational_turn - docs: >- - The granularity at which to generate predictions. The `granularity` field - is ignored if transcription is not enabled or if the `window` field has - been set. - - - - `word`: At the word level, our model provides a separate output for each - word, offering the most granular insight into emotional expression during - speech. - - - - `sentence`: At the sentence level of granularity, we annotate the - emotional tone of each spoken sentence with our Prosody and Emotional - Language models. - - - - `utterance`: Utterance-level granularity is between word- and - sentence-level. It takes into account natural pauses or breaks in speech, - providing more rapidly updated measures of emotional expression within a - flowing conversation. For text inputs, utterance-level granularity will - produce results identical to sentence-level granularity. - - - - `conversational_turn`: Conversational turn-level granularity provides a - distinct output for each change in speaker. It captures the full sequence - of words and sentences spoken uninterrupted by each person. This approach - provides a higher-level view of the emotional dynamics in a - multi-participant dialogue. For text inputs, specifying conversational - turn-level granularity for our Emotional Language model will produce - results for the entire passage. - source: - openapi: batch-files-openapi.yml - GroupedPredictionsBurstPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - GroupedPredictionsFacePrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - GroupedPredictionsFacemeshPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - GroupedPredictionsLanguagePrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - GroupedPredictionsNerPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - GroupedPredictionsProsodyPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - InProgress: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - source: - openapi: batch-openapi.json - InferenceBaseRequest: - properties: - models: - type: optional - docs: >- - Specify the models to use for inference. - - - If this field is not explicitly set, then all models will run by - default. - transcription: optional - urls: - type: optional> - docs: >- - URLs to the media files to be processed. Each must be a valid public - URL to a media file (see recommended input filetypes) or an archive - (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. - - - If you wish to supply more than 100 URLs, consider providing them as - an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). - text: - type: optional> - docs: >- - Text supplied directly to our Emotional Language and NER models for - analysis. - callback_url: - type: optional - docs: >- - If provided, a `POST` request will be made to the URL with the - generated predictions on completion or the error message on failure. - notify: - type: optional - docs: >- - Whether to send an email notification to the user upon job - completion/failure. - default: false - source: - openapi: batch-files-openapi.yml - InferencePrediction: - properties: - file: - type: string - docs: A file path relative to the top level source URL or file. - models: ModelsPredictions - source: - openapi: batch-openapi.json - InferenceRequest: - properties: - models: optional - transcription: optional - urls: - type: optional> - docs: >- - URLs to the media files to be processed. Each must be a valid public - URL to a media file (see recommended input filetypes) or an archive - (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. - - - If you wish to supply more than 100 URLs, consider providing them as - an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). - text: - type: optional> - docs: Text to supply directly to our language and NER models. - callback_url: - type: optional - docs: >- - If provided, a `POST` request will be made to the URL with the - generated predictions on completion or the error message on failure. - notify: - type: optional - docs: >- - Whether to send an email notification to the user upon job - completion/failure. - default: false - files: list - source: - openapi: batch-openapi.json - InferenceResults: - properties: - predictions: list - errors: list - source: - openapi: batch-openapi.json - InferenceSourcePredictResult: - properties: - source: Source - results: optional - error: - type: optional - docs: An error message. - source: - openapi: batch-openapi.json - JobEmbeddingGeneration: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - user_id: - type: string - validation: - format: uuid - request: EmbeddingGenerationBaseRequest - state: StateEmbeddingGeneration - source: - openapi: batch-openapi.json - JobInference: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - request: - type: InferenceRequest - docs: The request that initiated the job. - state: - type: StateInference - docs: The current state of the job. - source: - openapi: batch-openapi.json - JobTlInference: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - user_id: - type: string - validation: - format: uuid - request: TlInferenceBaseRequest - state: StateTlInference - source: - openapi: batch-openapi.json - JobTraining: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - user_id: - type: string - validation: - format: uuid - request: TrainingBaseRequest - state: StateTraining - source: - openapi: batch-openapi.json - JobId: - properties: - job_id: - type: string - docs: The ID of the started job. - validation: - format: uuid - source: - openapi: batch-files-openapi.yml - Language: - docs: >- - The Emotional Language model analyzes passages of text. This also supports - audio and video files by transcribing and then directly analyzing the - transcribed text. - - - Recommended input filetypes: `.txt`, `.mp3`, `.wav`, `.mp4` - properties: - granularity: optional - sentiment: optional - toxicity: optional - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - source: - openapi: batch-files-openapi.yml - LanguagePrediction: - properties: - text: - type: string - docs: A segment of text (like a word or a sentence). - position: PositionInterval - time: optional - confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence in this text. - speaker_confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence that this text was spoken by this speaker. - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - sentiment: - type: optional> - docs: >- - Sentiment predictions returned as a distribution. This model predicts - the probability that a given text could be interpreted as having each - sentiment level from `1` (negative) to `9` (positive). - - - Compared to returning one estimate of sentiment, this enables a more - nuanced analysis of a text's meaning. For example, a text with very - neutral sentiment would have an average rating of `5`. But also a text - that could be interpreted as having very positive sentiment or very - negative sentiment would also have an average rating of `5`. The - average sentiment is less informative than the distribution over - sentiment, so this API returns a value for each sentiment level. - toxicity: - type: optional> - docs: >- - Toxicity predictions returned as probabilities that the text can be - classified into the following categories: `toxic`, `severe_toxic`, - `obscene`, `threat`, `insult`, and `identity_hate`. - source: - openapi: batch-openapi.json - Models: - docs: The models used for inference. - properties: - face: optional - burst: optional - prosody: optional - language: optional - ner: optional - facemesh: optional - source: - openapi: batch-files-openapi.yml - ModelsPredictions: - properties: - face: optional - burst: optional - prosody: optional - language: optional - ner: optional - facemesh: optional - source: - openapi: batch-openapi.json - Ner: - docs: >- - The NER (Named-entity Recognition) model identifies real-world objects and - concepts in passages of text. This also supports audio and video files by - transcribing and then directly analyzing the transcribed text. - - - Recommended input filetypes: `.txt`, `.mp3`, `.wav`, `.mp4` - properties: - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - source: - openapi: batch-files-openapi.yml - NerPrediction: - properties: - entity: - type: string - docs: The recognized topic or entity. - position: PositionInterval - entity_confidence: - type: double - docs: Our NER model's relative confidence in the recognized topic or entity. - support: - type: double - docs: A measure of how often the entity is linked to by other entities. - uri: - type: string - docs: >- - A URL which provides more information about the recognized topic or - entity. - link_word: - type: string - docs: The specific word to which the emotion predictions are linked. - time: optional - confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence in this text. - speaker_confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence that this text was spoken by this speaker. - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - source: - openapi: batch-openapi.json - 'Null': - type: map - docs: No associated metadata for this model. Value will be `null`. - PositionInterval: - docs: >- - Position of a segment of text within a larger document, measured in - characters. Uses zero-based indexing. The beginning index is inclusive and - the end index is exclusive. - properties: - begin: - type: uint64 - docs: The index of the first character in the text segment, inclusive. - end: - type: uint64 - docs: The index of the last character in the text segment, exclusive. - source: - openapi: batch-openapi.json - PredictionsOptionalNullBurstPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - PredictionsOptionalNullFacePrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - PredictionsOptionalNullFacemeshPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - PredictionsOptionalTranscriptionMetadataLanguagePrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - PredictionsOptionalTranscriptionMetadataNerPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - PredictionsOptionalTranscriptionMetadataProsodyPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - Prosody: - docs: >- - The Speech Prosody model analyzes the intonation, stress, and rhythm of - spoken word. - - - Recommended input file types: `.wav`, `.mp3`, `.mp4` - properties: - granularity: optional - window: optional - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - source: - openapi: batch-files-openapi.yml - ProsodyPrediction: - properties: - text: - type: optional - docs: A segment of text (like a word or a sentence). - time: TimeInterval - confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence in this text. - speaker_confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence that this text was spoken by this speaker. - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - source: - openapi: batch-openapi.json - Queued: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - source: - openapi: batch-openapi.json - RegistryFileDetail: - properties: - file_id: - type: string - docs: File ID in the Asset Registry - file_url: - type: string - docs: URL to the file in the Asset Registry - source: - openapi: batch-openapi.json - Regression: map - SentimentScore: - properties: - name: - type: string - docs: Level of sentiment, ranging from `1` (negative) to `9` (positive) - score: - type: float - docs: Prediction for this level of sentiment - source: - openapi: batch-openapi.json - SortBy: - enum: - - created - - started - - ended - source: - openapi: batch-openapi.json - Source: - discriminant: type - base-properties: {} - union: - url: SourceUrl - file: SourceFile - text: SourceTextSource - source: - openapi: batch-openapi.json - SourceFile: - properties: {} - extends: - - File - source: - openapi: batch-openapi.json - SourceTextSource: - properties: {} - source: - openapi: batch-openapi.json - SourceUrl: - properties: {} - extends: - - Url - source: - openapi: batch-openapi.json - Url: - properties: - url: - type: string - docs: The URL of the source media file. - source: - openapi: batch-openapi.json - StateEmbeddingGeneration: - discriminant: status - base-properties: {} - union: - QUEUED: StateEmbeddingGenerationQueued - IN_PROGRESS: StateEmbeddingGenerationInProgress - COMPLETED: StateEmbeddingGenerationCompletedEmbeddingGeneration - FAILED: StateEmbeddingGenerationFailed - source: - openapi: batch-openapi.json - StateEmbeddingGenerationCompletedEmbeddingGeneration: - properties: {} - extends: - - CompletedEmbeddingGeneration - source: - openapi: batch-openapi.json - StateEmbeddingGenerationFailed: - properties: {} - extends: - - Failed - source: - openapi: batch-openapi.json - StateEmbeddingGenerationInProgress: - properties: {} - extends: - - InProgress - source: - openapi: batch-openapi.json - StateEmbeddingGenerationQueued: - properties: {} - extends: - - Queued - source: - openapi: batch-openapi.json - StateInference: - discriminant: status - base-properties: {} - union: - QUEUED: QueuedState - IN_PROGRESS: InProgressState - COMPLETED: CompletedState - FAILED: FailedState - source: - openapi: batch-openapi.json - CompletedState: - properties: {} - extends: - - CompletedInference - source: - openapi: batch-openapi.json - FailedState: - properties: {} - extends: - - Failed - source: - openapi: batch-openapi.json - InProgressState: - properties: {} - extends: - - InProgress - source: - openapi: batch-openapi.json - QueuedState: - properties: {} - extends: - - Queued - source: - openapi: batch-openapi.json - StateTlInference: - discriminant: status - base-properties: {} - union: - QUEUED: StateTlInferenceQueued - IN_PROGRESS: StateTlInferenceInProgress - COMPLETED: StateTlInferenceCompletedTlInference - FAILED: StateTlInferenceFailed - source: - openapi: batch-openapi.json - StateTlInferenceCompletedTlInference: - properties: {} - extends: - - CompletedTlInference - source: - openapi: batch-openapi.json - StateTlInferenceFailed: - properties: {} - extends: - - Failed - source: - openapi: batch-openapi.json - StateTlInferenceInProgress: - properties: {} - extends: - - InProgress - source: - openapi: batch-openapi.json - StateTlInferenceQueued: - properties: {} - extends: - - Queued - source: - openapi: batch-openapi.json - StateTraining: - discriminant: status - base-properties: {} - union: - QUEUED: StateTrainingQueued - IN_PROGRESS: StateTrainingInProgress - COMPLETED: StateTrainingCompletedTraining - FAILED: StateTrainingFailed - source: - openapi: batch-openapi.json - StateTrainingCompletedTraining: - properties: {} - extends: - - CompletedTraining - source: - openapi: batch-openapi.json - StateTrainingFailed: - properties: {} - extends: - - Failed - source: - openapi: batch-openapi.json - StateTrainingInProgress: - properties: {} - extends: - - InProgress - source: - openapi: batch-openapi.json - StateTrainingQueued: - properties: {} - extends: - - Queued - source: - openapi: batch-openapi.json - Status: - enum: - - QUEUED - - IN_PROGRESS - - COMPLETED - - FAILED - source: - openapi: batch-openapi.json - TlInferencePrediction: - properties: - file: - type: string - docs: A file path relative to the top level source URL or file. - file_type: string - custom_models: map - source: - openapi: batch-openapi.json - TlInferenceResults: - properties: - predictions: list - errors: list - source: - openapi: batch-openapi.json - TlInferenceSourcePredictResult: - properties: - source: Source - results: optional - error: - type: optional - docs: An error message. - source: - openapi: batch-openapi.json - Tag: - properties: - key: string - value: string - source: - openapi: batch-openapi.json - Target: - discriminated: false - union: - - long - - double - - string - source: - openapi: batch-openapi.json - Task: - discriminant: type - base-properties: {} - union: - classification: TaskClassification - regression: TaskRegression - source: - openapi: batch-openapi.json - TaskClassification: - properties: {} - source: - openapi: batch-openapi.json - TaskRegression: - properties: {} - source: - openapi: batch-openapi.json - TextSource: map - TimeInterval: - docs: A time range with a beginning and end, measured in seconds. - properties: - begin: - type: double - docs: Beginning of time range in seconds. - end: - type: double - docs: End of time range in seconds. - source: - openapi: batch-openapi.json - TlInferenceBaseRequest: - properties: - custom_model: CustomModel - urls: - type: optional> - docs: >- - URLs to the media files to be processed. Each must be a valid public - URL to a media file (see recommended input filetypes) or an archive - (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. - - - If you wish to supply more than 100 URLs, consider providing them as - an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). - callback_url: - type: optional - docs: >- - If provided, a `POST` request will be made to the URL with the - generated predictions on completion or the error message on failure. - notify: - type: optional - docs: >- - Whether to send an email notification to the user upon job - completion/failure. - default: false - source: - openapi: batch-openapi.json - CustomModel: - discriminated: false - union: - - CustomModelId - - CustomModelVersionId - source: - openapi: batch-openapi.json - CustomModelId: - properties: - id: string - source: - openapi: batch-openapi.json - CustomModelVersionId: - properties: - version_id: string - source: - openapi: batch-openapi.json - ToxicityScore: - properties: - name: - type: string - docs: Category of toxicity. - score: - type: float - docs: Prediction for this category of toxicity - source: - openapi: batch-openapi.json - TrainingBaseRequest: - properties: - custom_model: CustomModelRequest - dataset: Dataset - target_feature: - type: optional - default: label - task: optional - evaluation: optional - alternatives: optional> - callback_url: optional - notify: - type: optional - default: false - source: - openapi: batch-openapi.json - TrainingCustomModel: - properties: - id: string - version_id: optional - source: - openapi: batch-openapi.json - Transcription: - docs: |- - Transcription-related configuration options. - - To disable transcription, explicitly set this field to `null`. - properties: - language: - type: optional - docs: >- - By default, we use an automated language detection method for our - Speech Prosody, Language, and NER models. However, if you know what - language is being spoken in your media samples, you can specify it via - its BCP-47 tag and potentially obtain more accurate results. - - - You can specify any of the following languages: - - - Chinese: `zh` - - - Danish: `da` - - - Dutch: `nl` - - - English: `en` - - - English (Australia): `en-AU` - - - English (India): `en-IN` - - - English (New Zealand): `en-NZ` - - - English (United Kingdom): `en-GB` - - - French: `fr` - - - French (Canada): `fr-CA` - - - German: `de` - - - Hindi: `hi` - - - Hindi (Roman Script): `hi-Latn` - - - Indonesian: `id` - - - Italian: `it` - - - Japanese: `ja` - - - Korean: `ko` - - - Norwegian: `no` - - - Polish: `pl` - - - Portuguese: `pt` - - - Portuguese (Brazil): `pt-BR` - - - Portuguese (Portugal): `pt-PT` - - - Russian: `ru` - - - Spanish: `es` - - - Spanish (Latin America): `es-419` - - - Swedish: `sv` - - - Tamil: `ta` - - - Turkish: `tr` - - - Ukrainian: `uk` - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - confidence_threshold: - type: optional - docs: >- - Transcript confidence threshold. Transcripts generated with a - confidence less than this threshold will be considered invalid and not - used as an input for model inference. - default: 0.5 - validation: - min: 0 - max: 1 - source: - openapi: batch-files-openapi.yml - TranscriptionMetadata: - docs: Transcription metadata for your media file. - properties: - confidence: - type: double - docs: >- - Value between `0.0` and `1.0` indicating our transcription model's - relative confidence in the transcription of your media file. - detected_language: optional - source: - openapi: batch-openapi.json - Type: - enum: - - EMBEDDING_GENERATION - - INFERENCE - - TL_INFERENCE - - TRAINING - source: - openapi: batch-openapi.json - Unconfigurable: - type: map - docs: >- - To include predictions for this model type, set this field to `{}`. It is - currently not configurable further. - UnionJob: InferenceJob - EmbeddingGenerationJob: - properties: - type: string - extends: - - JobEmbeddingGeneration - source: - openapi: batch-openapi.json - InferenceJob: - properties: - type: - type: string - docs: >- - Denotes the job type. - - - Jobs created with the Expression Measurement API will have this field - set to `INFERENCE`. - extends: - - JobInference - source: - openapi: batch-openapi.json - CustomModelsInferenceJob: - properties: - type: string - extends: - - JobTlInference - source: - openapi: batch-openapi.json - CustomModelsTrainingJob: - properties: - type: string - extends: - - JobTraining - source: - openapi: batch-openapi.json - UnionPredictResult: InferenceSourcePredictResult - ValidationArgs: - properties: - positive_label: optional - source: - openapi: batch-openapi.json - When: - enum: - - created_before - - created_after - source: - openapi: batch-openapi.json - Window: - docs: >- - Generate predictions based on time. - - - Setting the `window` field allows for a 'sliding window' approach, where a - fixed-size window moves across the audio or video file in defined steps. - This enables continuous analysis of prosody within subsets of the file, - providing dynamic and localized insights into emotional expression. - properties: - length: - type: optional - docs: The length of the sliding window. - default: 4 - validation: - min: 0.5 - step: - type: optional - docs: The step size of the sliding window. - default: 1 - validation: - min: 0.5 - source: - openapi: batch-files-openapi.yml diff --git a/.mock/definition/expression-measurement/stream/__package__.yml b/.mock/definition/expression-measurement/stream/__package__.yml deleted file mode 100644 index 94df9784..00000000 --- a/.mock/definition/expression-measurement/stream/__package__.yml +++ /dev/null @@ -1,113 +0,0 @@ -types: - EmotionEmbeddingItem: - properties: - name: - type: optional - docs: Name of the emotion being expressed. - score: - type: optional - docs: Embedding value for the emotion being expressed. - source: - openapi: streaming-asyncapi.yml - EmotionEmbedding: - docs: A high-dimensional embedding in emotion space. - type: list - StreamBoundingBox: - docs: A bounding box around a face. - properties: - x: - type: optional - docs: x-coordinate of bounding box top left corner. - validation: - min: 0 - 'y': - type: optional - docs: y-coordinate of bounding box top left corner. - validation: - min: 0 - w: - type: optional - docs: Bounding box width. - validation: - min: 0 - h: - type: optional - docs: Bounding box height. - validation: - min: 0 - source: - openapi: streaming-asyncapi.yml - TimeRange: - docs: A time range with a beginning and end, measured in seconds. - properties: - begin: - type: optional - docs: Beginning of time range in seconds. - validation: - min: 0 - end: - type: optional - docs: End of time range in seconds. - validation: - min: 0 - source: - openapi: streaming-asyncapi.yml - TextPosition: - docs: > - Position of a segment of text within a larger document, measured in - characters. Uses zero-based indexing. The beginning index is inclusive and - the end index is exclusive. - properties: - begin: - type: optional - docs: The index of the first character in the text segment, inclusive. - validation: - min: 0 - end: - type: optional - docs: The index of the last character in the text segment, exclusive. - validation: - min: 0 - source: - openapi: streaming-asyncapi.yml - SentimentItem: - properties: - name: - type: optional - docs: Level of sentiment, ranging from 1 (negative) to 9 (positive) - score: - type: optional - docs: Prediction for this level of sentiment - source: - openapi: streaming-asyncapi.yml - Sentiment: - docs: >- - Sentiment predictions returned as a distribution. This model predicts the - probability that a given text could be interpreted as having each - sentiment level from 1 (negative) to 9 (positive). - - - Compared to returning one estimate of sentiment, this enables a more - nuanced analysis of a text's meaning. For example, a text with very - neutral sentiment would have an average rating of 5. But also a text that - could be interpreted as having very positive sentiment or very negative - sentiment would also have an average rating of 5. The average sentiment is - less informative than the distribution over sentiment, so this API returns - a value for each sentiment level. - type: list - ToxicityItem: - properties: - name: - type: optional - docs: Category of toxicity. - score: - type: optional - docs: Prediction for this category of toxicity - source: - openapi: streaming-asyncapi.yml - Toxicity: - docs: >- - Toxicity predictions returned as probabilities that the text can be - classified into the following categories: toxic, severe_toxic, obscene, - threat, insult, and identity_hate. - type: list diff --git a/.mock/definition/expression-measurement/stream/stream.yml b/.mock/definition/expression-measurement/stream/stream.yml deleted file mode 100644 index d9c46dc8..00000000 --- a/.mock/definition/expression-measurement/stream/stream.yml +++ /dev/null @@ -1,437 +0,0 @@ -channel: - path: /models - url: stream - auth: false - headers: - X-Hume-Api-Key: - type: string - name: humeApiKey - messages: - publish: - origin: client - body: - type: StreamModelsEndpointPayload - docs: Models endpoint payload - subscribe: - origin: server - body: SubscribeEvent - examples: - - messages: - - type: publish - body: {} - - type: subscribe - body: {} -types: - StreamFace: - docs: > - Configuration for the facial expression emotion model. - - - Note: Using the `reset_stream` parameter does not have any effect on face - identification. A single face identifier cache is maintained over a full - session whether `reset_stream` is used or not. - properties: - facs: - type: optional> - docs: >- - Configuration for FACS predictions. If missing or null, no FACS - predictions will be generated. - descriptions: - type: optional> - docs: >- - Configuration for Descriptions predictions. If missing or null, no - Descriptions predictions will be generated. - identify_faces: - type: optional - docs: > - Whether to return identifiers for faces across frames. If true, unique - identifiers will be assigned to face bounding boxes to differentiate - different faces. If false, all faces will be tagged with an "unknown" - ID. - default: false - fps_pred: - type: optional - docs: > - Number of frames per second to process. Other frames will be omitted - from the response. - default: 3 - prob_threshold: - type: optional - docs: > - Face detection probability threshold. Faces detected with a - probability less than this threshold will be omitted from the - response. - default: 3 - min_face_size: - type: optional - docs: > - Minimum bounding box side length in pixels to treat as a face. Faces - detected with a bounding box side length in pixels less than this - threshold will be omitted from the response. - default: 3 - source: - openapi: streaming-asyncapi.yml - inline: true - StreamLanguage: - docs: Configuration for the language emotion model. - properties: - sentiment: - type: optional> - docs: >- - Configuration for sentiment predictions. If missing or null, no - sentiment predictions will be generated. - toxicity: - type: optional> - docs: >- - Configuration for toxicity predictions. If missing or null, no - toxicity predictions will be generated. - granularity: - type: optional - docs: >- - The granularity at which to generate predictions. Values are `word`, - `sentence`, `utterance`, or `passage`. To get a single prediction for - the entire text of your streaming payload use `passage`. Default value - is `word`. - source: - openapi: streaming-asyncapi.yml - inline: true - Config: - docs: > - Configuration used to specify which models should be used and with what - settings. - properties: - burst: - type: optional> - docs: | - Configuration for the vocal burst emotion model. - - Note: Model configuration is not currently available in streaming. - - Please use the default configuration by passing an empty object `{}`. - face: - type: optional - docs: > - Configuration for the facial expression emotion model. - - - Note: Using the `reset_stream` parameter does not have any effect on - face identification. A single face identifier cache is maintained over - a full session whether `reset_stream` is used or not. - facemesh: - type: optional> - docs: | - Configuration for the facemesh emotion model. - - Note: Model configuration is not currently available in streaming. - - Please use the default configuration by passing an empty object `{}`. - language: - type: optional - docs: Configuration for the language emotion model. - prosody: - type: optional> - docs: | - Configuration for the speech prosody emotion model. - - Note: Model configuration is not currently available in streaming. - - Please use the default configuration by passing an empty object `{}`. - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelsEndpointPayload: - docs: Models endpoint payload - properties: - data: optional - models: - type: optional - docs: > - Configuration used to specify which models should be used and with - what settings. - stream_window_ms: - type: optional - docs: > - Length in milliseconds of streaming sliding window. - - - Extending the length of this window will prepend media context from - past payloads into the current payload. - - - For example, if on the first payload you send 500ms of data and on the - second payload you send an additional 500ms of data, a window of at - least 1000ms will allow the model to process all 1000ms of stream - data. - - - A window of 600ms would append the full 500ms of the second payload to - the last 100ms of the first payload. - - - Note: This feature is currently only supported for audio data and - audio models. For other file types and models this parameter will be - ignored. - default: 5000 - validation: - min: 500 - max: 10000 - reset_stream: - type: optional - docs: > - Whether to reset the streaming sliding window before processing the - current payload. - - - If this parameter is set to `true` then past context will be deleted - before processing the current payload. - - - Use reset_stream when one audio file is done being processed and you - do not want context to leak across files. - default: false - raw_text: - type: optional - docs: > - Set to `true` to enable the data parameter to be parsed as raw text - rather than base64 encoded bytes. - - This parameter is useful if you want to send text to be processed by - the language model, but it cannot be used with other file types like - audio, image, or video. - default: false - job_details: - type: optional - docs: > - Set to `true` to get details about the job. - - - This parameter can be set in the same payload as data or it can be set - without data and models configuration to get the job details between - payloads. - - - This parameter is useful to get the unique job ID. - default: false - payload_id: - type: optional - docs: > - Pass an arbitrary string as the payload ID and get it back at the top - level of the socket response. - - - This can be useful if you have multiple requests running - asynchronously and want to disambiguate responses as they are - received. - source: - openapi: streaming-asyncapi.yml - StreamModelPredictionsJobDetails: - docs: > - If the job_details flag was set in the request, details about the current - streaming job will be returned in the response body. - properties: - job_id: - type: optional - docs: ID of the current streaming job. - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsBurstPredictionsItem: - properties: - time: optional - emotions: optional - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsBurst: - docs: Response for the vocal burst emotion model. - properties: - predictions: optional> - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsFacePredictionsItem: - properties: - frame: - type: optional - docs: Frame number - time: - type: optional - docs: Time in seconds when face detection occurred. - bbox: optional - prob: - type: optional - docs: The predicted probability that a detected face was actually a face. - face_id: - type: optional - docs: >- - Identifier for a face. Not that this defaults to `unknown` unless face - identification is enabled in the face model configuration. - emotions: optional - facs: optional - descriptions: optional - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsFace: - docs: Response for the facial expression emotion model. - properties: - predictions: optional> - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsFacemeshPredictionsItem: - properties: - emotions: optional - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsFacemesh: - docs: Response for the facemesh emotion model. - properties: - predictions: optional> - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsLanguagePredictionsItem: - properties: - text: - type: optional - docs: A segment of text (like a word or a sentence). - position: optional - emotions: optional - sentiment: optional - toxicity: optional - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsLanguage: - docs: Response for the language emotion model. - properties: - predictions: optional> - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsProsodyPredictionsItem: - properties: - time: optional - emotions: optional - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictionsProsody: - docs: Response for the speech prosody emotion model. - properties: - predictions: optional> - source: - openapi: streaming-asyncapi.yml - inline: true - StreamModelPredictions: - docs: Model predictions - properties: - payload_id: - type: optional - docs: > - If a payload ID was passed in the request, the same payload ID will be - sent back in the response body. - job_details: - type: optional - docs: > - If the job_details flag was set in the request, details about the - current streaming job will be returned in the response body. - burst: - type: optional - docs: Response for the vocal burst emotion model. - face: - type: optional - docs: Response for the facial expression emotion model. - facemesh: - type: optional - docs: Response for the facemesh emotion model. - language: - type: optional - docs: Response for the language emotion model. - prosody: - type: optional - docs: Response for the speech prosody emotion model. - source: - openapi: streaming-asyncapi.yml - inline: true - JobDetails: - docs: > - If the job_details flag was set in the request, details about the current - streaming job will be returned in the response body. - properties: - job_id: - type: optional - docs: ID of the current streaming job. - source: - openapi: streaming-asyncapi.yml - inline: true - StreamErrorMessage: - docs: Error message - properties: - error: - type: optional - docs: Error message text. - code: - type: optional - docs: Unique identifier for the error. - payload_id: - type: optional - docs: > - If a payload ID was passed in the request, the same payload ID will be - sent back in the response body. - job_details: - type: optional - docs: > - If the job_details flag was set in the request, details about the - current streaming job will be returned in the response body. - source: - openapi: streaming-asyncapi.yml - inline: true - StreamWarningMessageJobDetails: - docs: > - If the job_details flag was set in the request, details about the current - streaming job will be returned in the response body. - properties: - job_id: - type: optional - docs: ID of the current streaming job. - source: - openapi: streaming-asyncapi.yml - inline: true - StreamWarningMessage: - docs: Warning message - properties: - warning: - type: optional - docs: Warning message text. - code: - type: optional - docs: Unique identifier for the error. - payload_id: - type: optional - docs: > - If a payload ID was passed in the request, the same payload ID will be - sent back in the response body. - job_details: - type: optional - docs: > - If the job_details flag was set in the request, details about the - current streaming job will be returned in the response body. - source: - openapi: streaming-asyncapi.yml - inline: true - SubscribeEvent: - discriminated: false - union: - - type: StreamModelPredictions - docs: Model predictions - - type: StreamErrorMessage - docs: Error message - - type: StreamWarningMessage - docs: Warning message - source: - openapi: streaming-asyncapi.yml -imports: - streamRoot: __package__.yml diff --git a/.mock/definition/tts/__package__.yml b/.mock/definition/tts/__package__.yml deleted file mode 100644 index 96e9aa75..00000000 --- a/.mock/definition/tts/__package__.yml +++ /dev/null @@ -1,938 +0,0 @@ -errors: - UnprocessableEntityError: - status-code: 422 - type: HTTPValidationError - docs: Validation Error - examples: - - value: {} - BadRequestError: - status-code: 400 - type: ErrorResponse - docs: Bad Request - examples: - - value: {} -service: - auth: false - base-path: '' - endpoints: - synthesize-json: - path: /v0/tts - method: POST - docs: >- - Synthesizes one or more input texts into speech using the specified - voice. If no voice is provided, a novel voice will be generated - dynamically. Optionally, additional context can be included to influence - the speech's style and prosody. - - - The response includes the base64-encoded audio and metadata in JSON - format. - source: - openapi: tts-openapi.json - display-name: Text-to-Speech (Json) - request: - body: - type: PostedTts - content-type: application/json - response: - docs: Successful Response - type: ReturnTts - status-code: 200 - errors: - - UnprocessableEntityError - examples: - - request: - context: - utterances: - - text: How can people see beauty so differently? - description: >- - A curious student with a clear and respectful tone, seeking - clarification on Hume's ideas with a straightforward - question. - format: - type: mp3 - num_generations: 1 - utterances: - - text: >- - Beauty is no quality in things themselves: It exists merely in - the mind which contemplates them. - description: >- - Middle-aged masculine voice with a clear, rhythmic Scots lilt, - rounded vowels, and a warm, steady tone with an articulate, - academic quality. - response: - body: - generations: - - audio: //PExAA0DDYRvkpNfhv3JI5JZ...etc. - duration: 7.44225 - encoding: - format: mp3 - sample_rate: 48000 - file_size: 120192 - generation_id: 795c949a-1510-4a80-9646-7d0863b023ab - snippets: - - - audio: //PExAA0DDYRvkpNfhv3JI5JZ...etc. - generation_id: 795c949a-1510-4a80-9646-7d0863b023ab - id: 37b1b1b1-1b1b-1b1b-1b1b-1b1b1b1b1b1b - text: >- - Beauty is no quality in things themselves: It exists - merely in the mind which contemplates them. - utterance_index: 0 - timestamps: [] - request_id: 66e01f90-4501-4aa0-bbaf-74f45dc15aa725906 - synthesize-file: - path: /v0/tts/file - method: POST - docs: >- - Synthesizes one or more input texts into speech using the specified - voice. If no voice is provided, a novel voice will be generated - dynamically. Optionally, additional context can be included to influence - the speech's style and prosody. - - - The response contains the generated audio file in the requested format. - source: - openapi: tts-openapi.json - display-name: Text-to-Speech (File) - request: - body: - type: PostedTts - content-type: application/json - response: - docs: OK - type: file - status-code: 200 - errors: - - UnprocessableEntityError - examples: - - request: - context: - generation_id: 09ad914d-8e7f-40f8-a279-e34f07f7dab2 - format: - type: mp3 - num_generations: 1 - utterances: - - text: >- - Beauty is no quality in things themselves: It exists merely in - the mind which contemplates them. - description: >- - Middle-aged masculine voice with a clear, rhythmic Scots lilt, - rounded vowels, and a warm, steady tone with an articulate, - academic quality. - synthesize-file-streaming: - path: /v0/tts/stream/file - method: POST - docs: >- - Streams synthesized speech using the specified voice. If no voice is - provided, a novel voice will be generated dynamically. Optionally, - additional context can be included to influence the speech's style and - prosody. - source: - openapi: tts-openapi.json - display-name: Text-to-Speech (Streamed File) - request: - body: - type: PostedTts - content-type: application/json - response: - docs: OK - type: file - status-code: 200 - errors: - - UnprocessableEntityError - examples: - - request: - utterances: - - text: >- - Beauty is no quality in things themselves: It exists merely in - the mind which contemplates them. - voice: - name: Male English Actor - provider: HUME_AI - synthesize-json-streaming: - path: /v0/tts/stream/json - method: POST - docs: >- - Streams synthesized speech using the specified voice. If no voice is - provided, a novel voice will be generated dynamically. Optionally, - additional context can be included to influence the speech's style and - prosody. - - - The response is a stream of JSON objects including audio encoded in - base64. - source: - openapi: tts-openapi.json - display-name: Text-to-Speech (Streamed JSON) - request: - body: - type: PostedTts - content-type: application/json - response-stream: - docs: Successful Response - type: TtsOutput - format: json - errors: - - UnprocessableEntityError - examples: - - request: - utterances: - - text: >- - Beauty is no quality in things themselves: It exists merely in - the mind which contemplates them. - voice: - name: Male English Actor - provider: HUME_AI - convertVoiceFile: - path: /v0/tts/voice_conversion/file - method: POST - source: - openapi: tts-openapi.json - display-name: Voice Conversion (Streamed File) - request: - name: ConvertVoiceFileRequest - body: - properties: - strip_headers: - type: optional - docs: >- - If enabled, the audio for all the chunks of a generation, once - concatenated together, will constitute a single audio file. - Otherwise, if disabled, each chunk's audio will be its own audio - file, each with its own headers (if applicable). - audio: file - context: - type: optional - docs: >- - Utterances to use as context for generating consistent speech - style and prosody across multiple requests. These will not be - converted to speech output. - voice: optional - format: - type: optional - docs: Specifies the output audio file format. - include_timestamp_types: - type: optional> - docs: >- - The set of timestamp types to include in the response. When used - in multipart/form-data, specify each value using bracket - notation: - `include_timestamp_types[0]=word&include_timestamp_types[1]=phoneme`. - Only supported for Octave 2 requests. - content-type: multipart/form-data - response: - docs: Successful Response - type: file - status-code: 200 - errors: - - UnprocessableEntityError - convertVoiceJson: - path: /v0/tts/voice_conversion/json - method: POST - source: - openapi: tts-openapi.json - display-name: Voice Conversion (Streamed JSON) - request: - name: ConvertVoiceJsonRequest - body: - properties: - strip_headers: - type: optional - docs: >- - If enabled, the audio for all the chunks of a generation, once - concatenated together, will constitute a single audio file. - Otherwise, if disabled, each chunk's audio will be its own audio - file, each with its own headers (if applicable). - audio: optional - context: - type: optional - docs: >- - Utterances to use as context for generating consistent speech - style and prosody across multiple requests. These will not be - converted to speech output. - voice: optional - format: - type: optional - docs: Specifies the output audio file format. - include_timestamp_types: - type: optional> - docs: >- - The set of timestamp types to include in the response. When used - in multipart/form-data, specify each value using bracket - notation: - `include_timestamp_types[0]=word&include_timestamp_types[1]=phoneme`. - Only supported for Octave 2 requests. - content-type: multipart/form-data - response-stream: - docs: Successful Response - type: TtsOutput - format: json - errors: - - UnprocessableEntityError - examples: - - request: {} - response: - stream: - - audio: audio - audio_format: mp3 - chunk_index: 1 - generation_id: generation_id - is_last_chunk: true - request_id: request_id - snippet: - audio: audio - generation_id: generation_id - id: id - text: text - timestamps: - - text: text - time: - begin: 1 - end: 1 - type: word - transcribed_text: transcribed_text - utterance_index: 1 - snippet_id: snippet_id - text: text - transcribed_text: transcribed_text - type: audio - utterance_index: 1 - source: - openapi: tts-openapi.json -types: - PostedContext: - discriminated: false - docs: >- - Utterances to use as context for generating consistent speech style and - prosody across multiple requests. These will not be converted to speech - output. - union: - - type: PostedContextWithGenerationId - - type: PostedContextWithUtterances - source: - openapi: tts-openapi.json - inline: true - Format: - discriminated: false - docs: Specifies the output audio file format. - union: - - type: FormatMp3 - - type: FormatPcm - - type: FormatWav - source: - openapi: tts-openapi.json - inline: true - AudioFormatType: - enum: - - mp3 - - pcm - - wav - source: - openapi: tts-openapi.json - PublishTts: - docs: Input message type for the TTS stream. - properties: - close: - type: optional - docs: Force the generation of audio and close the stream. - default: false - description: - type: optional - docs: >- - Natural language instructions describing how the text should be spoken - by the model (e.g., `"a soft, gentle voice with a strong British - accent"`). - validation: - maxLength: 1000 - flush: - type: optional - docs: >- - Force the generation of audio regardless of how much text has been - supplied. - default: false - speed: - type: optional - docs: A relative measure of how fast this utterance should be spoken. - default: 1 - validation: - min: 0.25 - max: 3 - text: - type: optional - docs: The input text to be converted to speech output. - default: '' - validation: - maxLength: 5000 - trailing_silence: - type: optional - docs: Duration of trailing silence (in seconds) to add to this utterance - default: 0 - validation: - min: 0 - max: 5 - voice: - type: optional - docs: >- - The name or ID of the voice from the `Voice Library` to be used as the - speaker for this and all subsequent utterances, until the `"voice"` - field is updated again. - source: - openapi: tts-asyncapi.json - MillisecondInterval: - properties: - begin: - type: integer - docs: Start time of the interval in milliseconds. - end: - type: integer - docs: End time of the interval in milliseconds. - source: - openapi: tts-openapi.json - TimestampMessage: - docs: A word or phoneme level timestamp for the generated audio. - properties: - generation_id: - type: string - docs: >- - The generation ID of the parent snippet that this chunk corresponds - to. - request_id: - type: string - docs: ID of the initiating request. - snippet_id: - type: string - docs: The ID of the parent snippet that this chunk corresponds to. - timestamp: - type: Timestamp - docs: A word or phoneme level timestamp for the generated audio. - type: literal<"timestamp"> - source: - openapi: tts-openapi.json - SnippetAudioChunk: - docs: Metadata for a chunk of generated audio. - properties: - audio: - type: string - docs: The generated audio output chunk in the requested format. - audio_format: - type: AudioFormatType - docs: The generated audio output format. - chunk_index: - type: integer - docs: The index of the audio chunk in the snippet. - generation_id: - type: string - docs: >- - The generation ID of the parent snippet that this chunk corresponds - to. - is_last_chunk: - type: boolean - docs: >- - Whether or not this is the last chunk streamed back from the decoder - for one input snippet. - request_id: - type: string - docs: ID of the initiating request. - snippet: optional - snippet_id: - type: string - docs: The ID of the parent snippet that this chunk corresponds to. - text: - type: string - docs: The text of the parent snippet that this chunk corresponds to. - transcribed_text: - type: optional - docs: >- - The transcribed text of the generated audio of the parent snippet that - this chunk corresponds to. It is only present if `instant_mode` is set - to `false`. - type: literal<"audio"> - utterance_index: - type: optional - docs: >- - The index of the utterance in the request that the parent snippet of - this chunk corresponds to. - source: - openapi: tts-openapi.json - Timestamp: - properties: - text: - type: string - docs: The word or phoneme text that the timestamp corresponds to. - time: - type: MillisecondInterval - docs: The start and end timestamps for the word or phoneme in milliseconds. - type: - type: TimestampType - source: - openapi: tts-openapi.json - TimestampType: - enum: - - word - - phoneme - source: - openapi: tts-openapi.json - PostedUtteranceVoiceWithId: - properties: - id: - type: string - docs: The unique ID associated with the **Voice**. - provider: - type: optional - docs: >- - Specifies the source provider associated with the chosen voice. - - - - **`HUME_AI`**: Select voices from Hume's [Voice - Library](https://platform.hume.ai/tts/voice-library), containing a - variety of preset, shared voices. - - - **`CUSTOM_VOICE`**: Select from voices you've personally generated - and saved in your account. - - - If no provider is explicitly set, the default provider is - `CUSTOM_VOICE`. When using voices from Hume's **Voice Library**, you - must explicitly set the provider to `HUME_AI`. - - - Preset voices from Hume's **Voice Library** are accessible by all - users. In contrast, your custom voices are private and accessible only - via requests authenticated with your API key. - source: - openapi: tts-openapi.json - PostedUtteranceVoiceWithName: - properties: - name: - type: string - docs: The name of a **Voice**. - provider: - type: optional - docs: >- - Specifies the source provider associated with the chosen voice. - - - - **`HUME_AI`**: Select voices from Hume's [Voice - Library](https://platform.hume.ai/tts/voice-library), containing a - variety of preset, shared voices. - - - **`CUSTOM_VOICE`**: Select from voices you've personally generated - and saved in your account. - - - If no provider is explicitly set, the default provider is - `CUSTOM_VOICE`. When using voices from Hume's **Voice Library**, you - must explicitly set the provider to `HUME_AI`. - - - Preset voices from Hume's **Voice Library** are accessible by all - users. In contrast, your custom voices are private and accessible only - via requests authenticated with your API key. - source: - openapi: tts-openapi.json - VoiceProvider: - enum: - - HUME_AI - - CUSTOM_VOICE - source: - openapi: tts-openapi.json - PostedUtteranceVoice: - discriminated: false - union: - - type: PostedUtteranceVoiceWithId - - type: PostedUtteranceVoiceWithName - source: - openapi: tts-openapi.json - OctaveVersion: - enum: - - value: '1' - name: One - - value: '2' - name: Two - docs: >- - Selects the Octave model version used to synthesize speech for this - request. If you omit this field, Hume automatically routes the request to - the most appropriate model. Setting a specific version ensures stable and - repeatable behavior across requests. - - - Use `2` to opt into the latest Octave capabilities. When you specify - version `2`, you must also provide a `voice`. Requests that set `version: - 2` without a voice will be rejected. - - - For a comparison of Octave versions, see the [Octave - versions](/docs/text-to-speech-tts/overview#octave-versions) section in - the TTS overview. - source: - openapi: tts-openapi.json - TtsOutput: - discriminated: false - union: - - type: SnippetAudioChunk - - type: TimestampMessage - source: - openapi: tts-openapi.json - Snippet: - properties: - audio: - type: string - docs: >- - The segmented audio output in the requested format, encoded as a - base64 string. - generation_id: - type: string - docs: The generation ID this snippet corresponds to. - id: - type: string - docs: A unique ID associated with this **Snippet**. - text: - type: string - docs: The text for this **Snippet**. - timestamps: - docs: >- - A list of word or phoneme level timestamps for the generated audio. - Timestamps are only returned for Octave 2 requests. - type: list - transcribed_text: - type: optional - docs: >- - The transcribed text of the generated audio. It is only present if - `instant_mode` is set to `false`. - utterance_index: - type: optional - docs: The index of the utterance in the request this snippet corresponds to. - source: - openapi: tts-openapi.json - PostedContextWithGenerationId: - properties: - generation_id: - type: string - docs: >- - The ID of a prior TTS generation to use as context for generating - consistent speech style and prosody across multiple requests. - Including context may increase audio generation times. - source: - openapi: tts-openapi.json - PostedContextWithUtterances: - properties: - utterances: - type: list - source: - openapi: tts-openapi.json - AudioEncoding: - docs: >- - Encoding information about the generated audio, including the `format` and - `sample_rate`. - properties: - format: - type: AudioFormatType - docs: Format for the output audio. - sample_rate: - type: integer - docs: >- - The sample rate (`Hz`) of the generated audio. The default sample rate - is `48000 Hz`. - source: - openapi: tts-openapi.json - ErrorResponse: - properties: - code: optional - error: optional - message: optional - source: - openapi: tts-openapi.json - ReturnGeneration: - properties: - audio: - type: string - docs: >- - The generated audio output in the requested format, encoded as a - base64 string. - duration: - type: double - docs: Duration of the generated audio in seconds. - encoding: - type: AudioEncoding - file_size: - type: integer - docs: Size of the generated audio in bytes. - generation_id: - type: string - docs: >- - A unique ID associated with this TTS generation that can be used as - context for generating consistent speech style and prosody across - multiple requests. - snippets: - docs: >- - A list of snippet groups where each group corresponds to an utterance - in the request. Each group contains segmented snippets that represent - the original utterance divided into more natural-sounding units - optimized for speech delivery. - type: list> - source: - openapi: tts-openapi.json - HTTPValidationError: - properties: - detail: - type: optional> - source: - openapi: tts-openapi.json - FormatMp3: - properties: - type: literal<"mp3"> - source: - openapi: tts-openapi.json - PostedTts: - properties: - context: - type: optional - docs: >- - Utterances to use as context for generating consistent speech style - and prosody across multiple requests. These will not be converted to - speech output. - format: - type: optional - docs: Specifies the output audio file format. - include_timestamp_types: - type: optional> - docs: >- - The set of timestamp types to include in the response. Only supported - for Octave 2 requests. - num_generations: - type: optional - docs: >- - Number of audio generations to produce from the input utterances. - - - Using `num_generations` enables faster processing than issuing - multiple sequential requests. Additionally, specifying - `num_generations` allows prosody continuation across all generations - without repeating context, ensuring each generation sounds slightly - different while maintaining contextual consistency. - default: 1 - validation: - min: 1 - max: 5 - split_utterances: - type: optional - docs: >- - Controls how audio output is segmented in the response. - - - - When **enabled** (`true`), input utterances are automatically split - into natural-sounding speech segments. - - - - When **disabled** (`false`), the response maintains a strict - one-to-one mapping between input utterances and output snippets. - - - This setting affects how the `snippets` array is structured in the - response, which may be important for applications that need to track - the relationship between input text and generated audio segments. When - setting to `false`, avoid including utterances with long `text`, as - this can result in distorted output. - default: true - strip_headers: - type: optional - docs: >- - If enabled, the audio for all the chunks of a generation, once - concatenated together, will constitute a single audio file. Otherwise, - if disabled, each chunk's audio will be its own audio file, each with - its own headers (if applicable). - default: false - utterances: - docs: >- - A list of **Utterances** to be converted to speech output. - - - An **Utterance** is a unit of input for - [Octave](/docs/text-to-speech-tts/overview), and includes input - `text`, an optional `description` to serve as the prompt for how the - speech should be delivered, an optional `voice` specification, and - additional controls to guide delivery for `speed` and - `trailing_silence`. - type: list - version: - type: optional - docs: >- - Selects the Octave model version used to synthesize speech for this - request. If you omit this field, Hume automatically routes the request - to the most appropriate model. Setting a specific version ensures - stable and repeatable behavior across requests. - - - Use `2` to opt into the latest Octave capabilities. When you specify - version `2`, you must also provide a `voice`. Requests that set - `version: 2` without a voice will be rejected. - - - For a comparison of Octave versions, see the [Octave - versions](/docs/text-to-speech-tts/overview#octave-versions) section - in the TTS overview. - instant_mode: - type: optional - docs: >- - Enables ultra-low latency streaming, significantly reducing the time - until the first audio chunk is received. Recommended for real-time - applications requiring immediate audio playback. For further details, - see our documentation on [instant - mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). - - - A - [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) - must be specified when instant mode is enabled. Dynamic voice - generation is not supported with this mode. - - - Instant mode is only supported for streaming endpoints (e.g., - [/v0/tts/stream/json](/reference/text-to-speech-tts/synthesize-json-streaming), - [/v0/tts/stream/file](/reference/text-to-speech-tts/synthesize-file-streaming)). - - - Ensure only a single generation is requested - ([num_generations](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.num_generations) - must be `1` or omitted). - default: true - source: - openapi: tts-openapi.json - ReturnTts: - properties: - generations: - type: list - request_id: - type: optional - docs: >- - A unique ID associated with this request for tracking and - troubleshooting. Use this ID when contacting [support](/support) for - troubleshooting assistance. - source: - openapi: tts-openapi.json - ReturnVoice: - docs: An Octave voice available for text-to-speech - properties: - compatible_octave_models: optional> - id: - type: optional - docs: ID of the voice in the `Voice Library`. - name: - type: optional - docs: Name of the voice in the `Voice Library`. - provider: - type: optional - docs: >- - The provider associated with the created voice. - - - Voices created through this endpoint will always have the provider set - to `CUSTOM_VOICE`, indicating a custom voice stored in your account. - source: - openapi: tts-openapi.json - FormatPcm: - properties: - type: literal<"pcm"> - source: - openapi: tts-openapi.json - PostedUtterance: - properties: - description: - type: optional - docs: >- - Natural language instructions describing how the synthesized speech - should sound, including but not limited to tone, intonation, pacing, - and accent. - - - **This field behaves differently depending on whether a voice is - specified**: - - - **Voice specified**: the description will serve as acting directions - for delivery. Keep directions concise—100 characters or fewer—for best - results. See our guide on [acting - instructions](/docs/text-to-speech-tts/acting-instructions). - - - **Voice not specified**: the description will serve as a voice - prompt for generating a voice. See our [prompting - guide](/docs/text-to-speech-tts/prompting) for design tips. - validation: - maxLength: 1000 - speed: - type: optional - docs: >- - Speed multiplier for the synthesized speech. Extreme values below 0.75 - and above 1.5 may sometimes cause instability to the generated output. - default: 1 - validation: - min: 0.5 - max: 2 - text: - type: string - docs: The input text to be synthesized into speech. - validation: - maxLength: 5000 - trailing_silence: - type: optional - docs: Duration of trailing silence (in seconds) to add to this utterance - default: 0 - validation: - min: 0 - max: 5 - voice: - type: optional - docs: >- - The `name` or `id` associated with a **Voice** from the **Voice - Library** to be used as the speaker for this and all subsequent - `utterances`, until the `voice` field is updated again. - - See our [voices guide](/docs/text-to-speech-tts/voices) for more details on generating and specifying **Voices**. - source: - openapi: tts-openapi.json - ValidationErrorLocItem: - discriminated: false - union: - - string - - integer - source: - openapi: tts-openapi.json - inline: true - ValidationError: - properties: - loc: - type: list - msg: string - type: string - source: - openapi: tts-openapi.json - FormatWav: - properties: - type: literal<"wav"> - source: - openapi: tts-openapi.json - ReturnPagedVoices: - docs: A paginated list Octave voices available for text-to-speech - properties: - page_number: - type: optional - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: optional - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: optional - docs: The total number of pages in the collection. - voices_page: - type: optional> - docs: >- - List of voices returned for the specified `page_number` and - `page_size`. - source: - openapi: tts-openapi.json diff --git a/.mock/definition/tts/streamInput.yml b/.mock/definition/tts/streamInput.yml deleted file mode 100644 index 807536e2..00000000 --- a/.mock/definition/tts/streamInput.yml +++ /dev/null @@ -1,96 +0,0 @@ -imports: - root: __package__.yml -channel: - path: /stream/input - url: tts - auth: false - docs: Generate emotionally expressive speech. - query-parameters: - access_token: - type: optional - default: '' - docs: >- - Access token used for authenticating the client. If not provided, an - `api_key` must be provided to authenticate. - - - The access token is generated using both an API key and a Secret key, - which provides an additional layer of security compared to using just an - API key. - - - For more details, refer to the [Authentication Strategies - Guide](/docs/introduction/api-key#authentication-strategies). - context_generation_id: - type: optional - docs: >- - The ID of a prior TTS generation to use as context for generating - consistent speech style and prosody across multiple requests. Including - context may increase audio generation times. - format_type: - type: optional - docs: The format to be used for audio generation. - include_timestamp_types: - type: optional - allow-multiple: true - docs: The set of timestamp types to include in the response. - instant_mode: - type: optional - default: true - docs: >- - Enables ultra-low latency streaming, significantly reducing the time - until the first audio chunk is received. Recommended for real-time - applications requiring immediate audio playback. For further details, - see our documentation on [instant - mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode). - no_binary: - type: optional - default: false - docs: If enabled, no binary websocket messages will be sent to the client. - strip_headers: - type: optional - default: false - docs: >- - If enabled, the audio for all the chunks of a generation, once - concatenated together, will constitute a single audio file. Otherwise, - if disabled, each chunk's audio will be its own audio file, each with - its own headers (if applicable). - version: - type: optional - docs: >- - The version of the Octave Model to use. 1 for the legacy model, 2 for - the new model. - api_key: - type: optional - default: '' - docs: >- - API key used for authenticating the client. If not provided, an - `access_token` must be provided to authenticate. - - - For more details, refer to the [Authentication Strategies - Guide](/docs/introduction/api-key#authentication-strategies). - messages: - publish: - origin: client - body: - type: root.PublishTts - subscribe: - origin: server - body: - type: root.TtsOutput - examples: - - messages: - - type: publish - body: {} - - type: subscribe - body: - audio: audio - audio_format: mp3 - chunk_index: 1 - generation_id: generation_id - is_last_chunk: true - request_id: request_id - snippet_id: snippet_id - text: text - type: audio diff --git a/.mock/definition/tts/voices.yml b/.mock/definition/tts/voices.yml deleted file mode 100644 index b01203e9..00000000 --- a/.mock/definition/tts/voices.yml +++ /dev/null @@ -1,143 +0,0 @@ -imports: - root: __package__.yml -service: - auth: false - base-path: '' - endpoints: - list: - path: /v0/tts/voices - method: GET - docs: >- - Lists voices you have saved in your account, or voices from the [Voice - Library](https://platform.hume.ai/tts/voice-library). - pagination: - offset: $request.page_number - results: $response.voices_page - source: - openapi: tts-openapi.json - display-name: List voices - request: - name: VoicesListRequest - query-parameters: - provider: - type: root.VoiceProvider - docs: >- - Specify the voice provider to filter voices returned by the - endpoint: - - - - **`HUME_AI`**: Lists preset, shared voices from Hume's [Voice - Library](https://platform.hume.ai/tts/voice-library). - - - **`CUSTOM_VOICE`**: Lists custom voices created and saved to - your account. - page_number: - type: optional - default: 0 - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - ascending_order: optional - filter_tag: - type: optional - allow-multiple: true - response: - docs: Success - type: root.ReturnPagedVoices - status-code: 200 - errors: - - root.BadRequestError - examples: - - query-parameters: - provider: CUSTOM_VOICE - response: - body: - page_number: 0 - page_size: 10 - total_pages: 1 - voices_page: - - id: c42352c0-4566-455d-b180-0f654b65b525 - name: David Hume - provider: CUSTOM_VOICE - - id: d87352b0-26a3-4b11-081b-d157a5674d19 - name: Goliath Hume - provider: CUSTOM_VOICE - create: - path: /v0/tts/voices - method: POST - docs: >- - Saves a new custom voice to your account using the specified TTS - generation ID. - - - Once saved, this voice can be reused in subsequent TTS requests, - ensuring consistent speech style and prosody. For more details on voice - creation, see the [Voices Guide](/docs/text-to-speech-tts/voices). - source: - openapi: tts-openapi.json - display-name: Create voice - request: - name: PostedVoice - body: - properties: - generation_id: - type: string - docs: >- - A unique ID associated with this TTS generation that can be used - as context for generating consistent speech style and prosody - across multiple requests. - name: - type: string - docs: Name of the voice in the `Voice Library`. - content-type: application/json - response: - docs: Successful Response - type: root.ReturnVoice - status-code: 200 - errors: - - root.UnprocessableEntityError - examples: - - request: - generation_id: 795c949a-1510-4a80-9646-7d0863b023ab - name: David Hume - response: - body: - id: c42352c0-4566-455d-b180-0f654b65b525 - name: David Hume - provider: CUSTOM_VOICE - delete: - path: /v0/tts/voices - method: DELETE - docs: Deletes a previously generated custom voice. - source: - openapi: tts-openapi.json - display-name: Delete voice - request: - name: VoicesDeleteRequest - query-parameters: - name: - type: string - docs: Name of the voice to delete - errors: - - root.BadRequestError - examples: - - query-parameters: - name: David Hume - source: - openapi: tts-openapi.json diff --git a/.mock/fern.config.json b/.mock/fern.config.json deleted file mode 100644 index 2a7ce060..00000000 --- a/.mock/fern.config.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "organization": "hume", - "version": "0.114.0" -} diff --git a/package.json b/package.json index d14cc300..8b95e20d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "hume", - "version": "0.15.7", + "version": "0.15.8", "private": false, "repository": "github:humeai/hume-typescript-sdk", "type": "commonjs", diff --git a/reference.md b/reference.md index 11e7a680..61f49ee5 100644 --- a/reference.md +++ b/reference.md @@ -416,7 +416,7 @@ const response = page.response;
-**request:** `Hume.tts.VoicesListRequest` +**request:** `Hume.tts.ListVoicesRequest`
@@ -549,7 +549,7 @@ await client.tts.voices.delete({
-**request:** `Hume.tts.VoicesDeleteRequest` +**request:** `Hume.tts.DeleteVoicesRequest`
@@ -570,7 +570,7 @@ await client.tts.voices.delete({ ## EmpathicVoice ControlPlane -
client.empathicVoice.controlPlane.send(chatId, { ...params }) -> void +
client.empathicVoice.controlPlane.send({ ...params }) -> void
@@ -597,8 +597,11 @@ Send a message to a specific chat.
```typescript -await client.empathicVoice.controlPlane.send("chat_id", { - type: "session_settings" +await client.empathicVoice.controlPlane.send({ + chatId: "chat_id", + body: { + type: "session_settings" + } }); ``` @@ -615,15 +618,7 @@ await client.empathicVoice.controlPlane.send("chat_id", {
-**chatId:** `string` - -
-
- -
-
- -**request:** `Hume.ControlPlanePublishEvent` +**request:** `Hume.empathicVoice.SendControlPlaneRequest`
@@ -709,7 +704,7 @@ const response = page.response;
-**request:** `Hume.empathicVoice.ChatGroupsListChatGroupsRequest` +**request:** `Hume.empathicVoice.ListChatGroupsChatGroupsRequest`
@@ -729,7 +724,7 @@ const response = page.response;
-
client.empathicVoice.chatGroups.getChatGroup(id, { ...params }) -> Hume.ReturnChatGroupPagedChats +
client.empathicVoice.chatGroups.getChatGroup({ ...params }) -> Hume.ReturnChatGroupPagedChats
@@ -756,7 +751,8 @@ Fetches a **ChatGroup** by ID, including a paginated list of **Chats** associate
```typescript -await client.empathicVoice.chatGroups.getChatGroup("697056f0-6c7e-487d-9bd8-9c19df79f05f", { +await client.empathicVoice.chatGroups.getChatGroup({ + id: "697056f0-6c7e-487d-9bd8-9c19df79f05f", pageNumber: 0, pageSize: 1, ascendingOrder: true @@ -776,15 +772,7 @@ await client.empathicVoice.chatGroups.getChatGroup("697056f0-6c7e-487d-9bd8-9c19
-**id:** `string` — Identifier for a Chat Group. Formatted as a UUID. - -
-
- -
-
- -**request:** `Hume.empathicVoice.ChatGroupsGetChatGroupRequest` +**request:** `Hume.empathicVoice.GetChatGroupChatGroupsRequest`
@@ -804,7 +792,7 @@ await client.empathicVoice.chatGroups.getChatGroup("697056f0-6c7e-487d-9bd8-9c19
-
client.empathicVoice.chatGroups.getAudio(id, { ...params }) -> Hume.ReturnChatGroupPagedAudioReconstructions +
client.empathicVoice.chatGroups.getAudio({ ...params }) -> Hume.ReturnChatGroupPagedAudioReconstructions
@@ -831,7 +819,8 @@ Fetches a paginated list of audio for each **Chat** within the specified **Chat
```typescript -await client.empathicVoice.chatGroups.getAudio("369846cf-6ad5-404d-905e-a8acb5cdfc78", { +await client.empathicVoice.chatGroups.getAudio({ + id: "369846cf-6ad5-404d-905e-a8acb5cdfc78", pageNumber: 0, pageSize: 10, ascendingOrder: true @@ -851,15 +840,7 @@ await client.empathicVoice.chatGroups.getAudio("369846cf-6ad5-404d-905e-a8acb5cd
-**id:** `string` — Identifier for a Chat Group. Formatted as a UUID. - -
-
- -
-
- -**request:** `Hume.empathicVoice.ChatGroupsGetAudioRequest` +**request:** `Hume.empathicVoice.GetAudioChatGroupsRequest`
@@ -879,7 +860,7 @@ await client.empathicVoice.chatGroups.getAudio("369846cf-6ad5-404d-905e-a8acb5cd
-
client.empathicVoice.chatGroups.listChatGroupEvents(id, { ...params }) -> core.Page +
client.empathicVoice.chatGroups.listChatGroupEvents({ ...params }) -> core.Page
@@ -906,7 +887,8 @@ Fetches a paginated list of **Chat** events associated with a **Chat Group**.
```typescript -const pageableResponse = await client.empathicVoice.chatGroups.listChatGroupEvents("697056f0-6c7e-487d-9bd8-9c19df79f05f", { +const pageableResponse = await client.empathicVoice.chatGroups.listChatGroupEvents({ + id: "697056f0-6c7e-487d-9bd8-9c19df79f05f", pageNumber: 0, pageSize: 3, ascendingOrder: true @@ -916,7 +898,8 @@ for await (const item of pageableResponse) { } // Or you can manually iterate page-by-page -let page = await client.empathicVoice.chatGroups.listChatGroupEvents("697056f0-6c7e-487d-9bd8-9c19df79f05f", { +let page = await client.empathicVoice.chatGroups.listChatGroupEvents({ + id: "697056f0-6c7e-487d-9bd8-9c19df79f05f", pageNumber: 0, pageSize: 3, ascendingOrder: true @@ -942,15 +925,7 @@ const response = page.response;
-**id:** `string` — Identifier for a Chat Group. Formatted as a UUID. - -
-
- -
-
- -**request:** `Hume.empathicVoice.ChatGroupsListChatGroupEventsRequest` +**request:** `Hume.empathicVoice.ListChatGroupEventsChatGroupsRequest`
@@ -1034,7 +1009,7 @@ const response = page.response;
-**request:** `Hume.empathicVoice.ChatsListChatsRequest` +**request:** `Hume.empathicVoice.ListChatsChatsRequest`
@@ -1054,7 +1029,7 @@ const response = page.response;
-
client.empathicVoice.chats.listChatEvents(id, { ...params }) -> core.Page +
client.empathicVoice.chats.listChatEvents({ ...params }) -> core.Page
@@ -1081,7 +1056,8 @@ Fetches a paginated list of **Chat** events.
```typescript -const pageableResponse = await client.empathicVoice.chats.listChatEvents("470a49f6-1dec-4afe-8b61-035d3b2d63b0", { +const pageableResponse = await client.empathicVoice.chats.listChatEvents({ + id: "470a49f6-1dec-4afe-8b61-035d3b2d63b0", pageNumber: 0, pageSize: 3, ascendingOrder: true @@ -1091,7 +1067,8 @@ for await (const item of pageableResponse) { } // Or you can manually iterate page-by-page -let page = await client.empathicVoice.chats.listChatEvents("470a49f6-1dec-4afe-8b61-035d3b2d63b0", { +let page = await client.empathicVoice.chats.listChatEvents({ + id: "470a49f6-1dec-4afe-8b61-035d3b2d63b0", pageNumber: 0, pageSize: 3, ascendingOrder: true @@ -1117,15 +1094,7 @@ const response = page.response;
-**id:** `string` — Identifier for a Chat. Formatted as a UUID. - -
-
- -
-
- -**request:** `Hume.empathicVoice.ChatsListChatEventsRequest` +**request:** `Hume.empathicVoice.ListChatEventsChatsRequest`
@@ -1145,7 +1114,7 @@ const response = page.response;
-
client.empathicVoice.chats.getAudio(id) -> Hume.ReturnChatAudioReconstruction +
client.empathicVoice.chats.getAudio({ ...params }) -> Hume.ReturnChatAudioReconstruction
@@ -1172,7 +1141,9 @@ Fetches the audio of a previous **Chat**. For more details, see our guide on aud
```typescript -await client.empathicVoice.chats.getAudio("470a49f6-1dec-4afe-8b61-035d3b2d63b0"); +await client.empathicVoice.chats.getAudio({ + id: "470a49f6-1dec-4afe-8b61-035d3b2d63b0" +}); ```
@@ -1188,7 +1159,7 @@ await client.empathicVoice.chats.getAudio("470a49f6-1dec-4afe-8b61-035d3b2d63b0"
-**id:** `string` — Identifier for a chat. Formatted as a UUID. +**request:** `Hume.empathicVoice.GetAudioChatsRequest`
@@ -1272,7 +1243,7 @@ const response = page.response;
-**request:** `Hume.empathicVoice.ConfigsListConfigsRequest` +**request:** `Hume.empathicVoice.ListConfigsConfigsRequest`
@@ -1387,7 +1358,7 @@ await client.empathicVoice.configs.createConfig({
-
client.empathicVoice.configs.listConfigVersions(id, { ...params }) -> core.Page +
client.empathicVoice.configs.listConfigVersions({ ...params }) -> core.Page
@@ -1416,13 +1387,17 @@ For more details on configuration options and how to configure EVI, see our [con
```typescript -const pageableResponse = await client.empathicVoice.configs.listConfigVersions("1b60e1a0-cc59-424a-8d2c-189d354db3f3"); +const pageableResponse = await client.empathicVoice.configs.listConfigVersions({ + id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3" +}); for await (const item of pageableResponse) { console.log(item); } // Or you can manually iterate page-by-page -let page = await client.empathicVoice.configs.listConfigVersions("1b60e1a0-cc59-424a-8d2c-189d354db3f3"); +let page = await client.empathicVoice.configs.listConfigVersions({ + id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3" +}); while (page.hasNextPage()) { page = page.getNextPage(); } @@ -1444,15 +1419,7 @@ const response = page.response;
-**id:** `string` — Identifier for a Config. Formatted as a UUID. - -
-
- -
-
- -**request:** `Hume.empathicVoice.ConfigsListConfigVersionsRequest` +**request:** `Hume.empathicVoice.ListConfigVersionsConfigsRequest`
@@ -1472,7 +1439,7 @@ const response = page.response;
-
client.empathicVoice.configs.createConfigVersion(id, { ...params }) -> Hume.ReturnConfig +
client.empathicVoice.configs.createConfigVersion({ ...params }) -> Hume.ReturnConfig
@@ -1501,7 +1468,8 @@ For more details on configuration options and how to configure EVI, see our [con
```typescript -await client.empathicVoice.configs.createConfigVersion("1b60e1a0-cc59-424a-8d2c-189d354db3f3", { +await client.empathicVoice.configs.createConfigVersion({ + id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", versionDescription: "This is an updated version of the Weather Assistant Config.", eviVersion: "3", prompt: { @@ -1550,14 +1518,6 @@ await client.empathicVoice.configs.createConfigVersion("1b60e1a0-cc59-424a-8d2c-
-**id:** `string` — Identifier for a Config. Formatted as a UUID. - -
-
- -
-
- **request:** `Hume.empathicVoice.PostedConfigVersion`
@@ -1578,7 +1538,7 @@ await client.empathicVoice.configs.createConfigVersion("1b60e1a0-cc59-424a-8d2c-
-
client.empathicVoice.configs.deleteConfig(id) -> void +
client.empathicVoice.configs.deleteConfig({ ...params }) -> void
@@ -1607,7 +1567,9 @@ For more details on configuration options and how to configure EVI, see our [con
```typescript -await client.empathicVoice.configs.deleteConfig("1b60e1a0-cc59-424a-8d2c-189d354db3f3"); +await client.empathicVoice.configs.deleteConfig({ + id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3" +}); ```
@@ -1623,7 +1585,7 @@ await client.empathicVoice.configs.deleteConfig("1b60e1a0-cc59-424a-8d2c-189d354
-**id:** `string` — Identifier for a Config. Formatted as a UUID. +**request:** `Hume.empathicVoice.DeleteConfigConfigsRequest`
@@ -1643,7 +1605,7 @@ await client.empathicVoice.configs.deleteConfig("1b60e1a0-cc59-424a-8d2c-189d354
-
client.empathicVoice.configs.updateConfigName(id, { ...params }) -> string +
client.empathicVoice.configs.updateConfigName({ ...params }) -> string
@@ -1672,7 +1634,8 @@ For more details on configuration options and how to configure EVI, see our [con
```typescript -await client.empathicVoice.configs.updateConfigName("1b60e1a0-cc59-424a-8d2c-189d354db3f3", { +await client.empathicVoice.configs.updateConfigName({ + id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", name: "Updated Weather Assistant Config Name" }); @@ -1690,14 +1653,6 @@ await client.empathicVoice.configs.updateConfigName("1b60e1a0-cc59-424a-8d2c-189
-**id:** `string` — Identifier for a Config. Formatted as a UUID. - -
-
- -
-
- **request:** `Hume.empathicVoice.PostedConfigName`
@@ -1718,7 +1673,7 @@ await client.empathicVoice.configs.updateConfigName("1b60e1a0-cc59-424a-8d2c-189
-
client.empathicVoice.configs.getConfigVersion(id, version) -> Hume.ReturnConfig +
client.empathicVoice.configs.getConfigVersion({ ...params }) -> Hume.ReturnConfig
@@ -1747,7 +1702,10 @@ For more details on configuration options and how to configure EVI, see our [con
```typescript -await client.empathicVoice.configs.getConfigVersion("1b60e1a0-cc59-424a-8d2c-189d354db3f3", 1); +await client.empathicVoice.configs.getConfigVersion({ + id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version: 1 +}); ```
@@ -1763,21 +1721,7 @@ await client.empathicVoice.configs.getConfigVersion("1b60e1a0-cc59-424a-8d2c-189
-**id:** `string` — Identifier for a Config. Formatted as a UUID. - -
-
- -
-
- -**version:** `number` - -Version number for a Config. - -Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. +**request:** `Hume.empathicVoice.GetConfigVersionConfigsRequest`
@@ -1797,7 +1741,7 @@ Version numbers are integer values representing different iterations of the Conf
-
client.empathicVoice.configs.deleteConfigVersion(id, version) -> void +
client.empathicVoice.configs.deleteConfigVersion({ ...params }) -> void
@@ -1826,7 +1770,10 @@ For more details on configuration options and how to configure EVI, see our [con
```typescript -await client.empathicVoice.configs.deleteConfigVersion("1b60e1a0-cc59-424a-8d2c-189d354db3f3", 1); +await client.empathicVoice.configs.deleteConfigVersion({ + id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version: 1 +}); ```
@@ -1842,21 +1789,7 @@ await client.empathicVoice.configs.deleteConfigVersion("1b60e1a0-cc59-424a-8d2c-
-**id:** `string` — Identifier for a Config. Formatted as a UUID. - -
-
- -
-
- -**version:** `number` - -Version number for a Config. - -Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. +**request:** `Hume.empathicVoice.DeleteConfigVersionConfigsRequest`
@@ -1876,7 +1809,7 @@ Version numbers are integer values representing different iterations of the Conf
-
client.empathicVoice.configs.updateConfigDescription(id, version, { ...params }) -> Hume.ReturnConfig +
client.empathicVoice.configs.updateConfigDescription({ ...params }) -> Hume.ReturnConfig
@@ -1905,7 +1838,9 @@ For more details on configuration options and how to configure EVI, see our [con
```typescript -await client.empathicVoice.configs.updateConfigDescription("1b60e1a0-cc59-424a-8d2c-189d354db3f3", 1, { +await client.empathicVoice.configs.updateConfigDescription({ + id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version: 1, versionDescription: "This is an updated version_description." }); @@ -1923,28 +1858,6 @@ await client.empathicVoice.configs.updateConfigDescription("1b60e1a0-cc59-424a-8
-**id:** `string` — Identifier for a Config. Formatted as a UUID. - -
-
- -
-
- -**version:** `number` - -Version number for a Config. - -Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. - -
-
- -
-
- **request:** `Hume.empathicVoice.PostedConfigVersionDescription`
@@ -1966,7 +1879,7 @@ Version numbers are integer values representing different iterations of the Conf
## EmpathicVoice Prompts -
client.empathicVoice.prompts.listPrompts({ ...params }) -> core.Page<(Hume.ReturnPrompt | undefined), Hume.ReturnPagedPrompts> +
client.empathicVoice.prompts.listPrompts({ ...params }) -> core.Page<(Hume.ReturnPrompt | null), Hume.ReturnPagedPrompts>
@@ -2029,7 +1942,7 @@ const response = page.response;
-**request:** `Hume.empathicVoice.PromptsListPromptsRequest` +**request:** `Hume.empathicVoice.ListPromptsPromptsRequest`
@@ -2049,7 +1962,7 @@ const response = page.response;
-
client.empathicVoice.prompts.createPrompt({ ...params }) -> Hume.ReturnPrompt | undefined +
client.empathicVoice.prompts.createPrompt({ ...params }) -> Hume.ReturnPrompt | null
@@ -2117,7 +2030,7 @@ await client.empathicVoice.prompts.createPrompt({
-
client.empathicVoice.prompts.listPromptVersions(id, { ...params }) -> Hume.ReturnPagedPrompts +
client.empathicVoice.prompts.listPromptVersions({ ...params }) -> Hume.ReturnPagedPrompts
@@ -2146,7 +2059,9 @@ See our [prompting guide](/docs/speech-to-speech-evi/guides/phone-calling) for t
```typescript -await client.empathicVoice.prompts.listPromptVersions("af699d45-2985-42cc-91b9-af9e5da3bac5"); +await client.empathicVoice.prompts.listPromptVersions({ + id: "af699d45-2985-42cc-91b9-af9e5da3bac5" +}); ```
@@ -2162,15 +2077,7 @@ await client.empathicVoice.prompts.listPromptVersions("af699d45-2985-42cc-91b9-a
-**id:** `string` — Identifier for a Prompt. Formatted as a UUID. - -
-
- -
-
- -**request:** `Hume.empathicVoice.PromptsListPromptVersionsRequest` +**request:** `Hume.empathicVoice.ListPromptVersionsPromptsRequest`
@@ -2190,7 +2097,7 @@ await client.empathicVoice.prompts.listPromptVersions("af699d45-2985-42cc-91b9-a
-
client.empathicVoice.prompts.createPromptVersion(id, { ...params }) -> Hume.ReturnPrompt | undefined +
client.empathicVoice.prompts.createPromptVersion({ ...params }) -> Hume.ReturnPrompt | null
@@ -2219,7 +2126,8 @@ See our [prompting guide](/docs/speech-to-speech-evi/guides/phone-calling) for t
```typescript -await client.empathicVoice.prompts.createPromptVersion("af699d45-2985-42cc-91b9-af9e5da3bac5", { +await client.empathicVoice.prompts.createPromptVersion({ + id: "af699d45-2985-42cc-91b9-af9e5da3bac5", text: "You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", versionDescription: "This is an updated version of the Weather Assistant Prompt." }); @@ -2238,14 +2146,6 @@ await client.empathicVoice.prompts.createPromptVersion("af699d45-2985-42cc-91b9-
-**id:** `string` — Identifier for a Prompt. Formatted as a UUID. - -
-
- -
-
- **request:** `Hume.empathicVoice.PostedPromptVersion`
@@ -2266,7 +2166,7 @@ await client.empathicVoice.prompts.createPromptVersion("af699d45-2985-42cc-91b9-
-
client.empathicVoice.prompts.deletePrompt(id) -> void +
client.empathicVoice.prompts.deletePrompt({ ...params }) -> void
@@ -2295,7 +2195,9 @@ See our [prompting guide](/docs/speech-to-speech-evi/guides/phone-calling) for t
```typescript -await client.empathicVoice.prompts.deletePrompt("af699d45-2985-42cc-91b9-af9e5da3bac5"); +await client.empathicVoice.prompts.deletePrompt({ + id: "af699d45-2985-42cc-91b9-af9e5da3bac5" +}); ```
@@ -2311,7 +2213,7 @@ await client.empathicVoice.prompts.deletePrompt("af699d45-2985-42cc-91b9-af9e5da
-**id:** `string` — Identifier for a Prompt. Formatted as a UUID. +**request:** `Hume.empathicVoice.DeletePromptPromptsRequest`
@@ -2331,7 +2233,7 @@ await client.empathicVoice.prompts.deletePrompt("af699d45-2985-42cc-91b9-af9e5da
-
client.empathicVoice.prompts.updatePromptName(id, { ...params }) -> string +
client.empathicVoice.prompts.updatePromptName({ ...params }) -> string
@@ -2360,7 +2262,8 @@ See our [prompting guide](/docs/speech-to-speech-evi/guides/phone-calling) for t
```typescript -await client.empathicVoice.prompts.updatePromptName("af699d45-2985-42cc-91b9-af9e5da3bac5", { +await client.empathicVoice.prompts.updatePromptName({ + id: "af699d45-2985-42cc-91b9-af9e5da3bac5", name: "Updated Weather Assistant Prompt Name" }); @@ -2378,14 +2281,6 @@ await client.empathicVoice.prompts.updatePromptName("af699d45-2985-42cc-91b9-af9
-**id:** `string` — Identifier for a Prompt. Formatted as a UUID. - -
-
- -
-
- **request:** `Hume.empathicVoice.PostedPromptName`
@@ -2406,7 +2301,7 @@ await client.empathicVoice.prompts.updatePromptName("af699d45-2985-42cc-91b9-af9
-
client.empathicVoice.prompts.getPromptVersion(id, version) -> Hume.ReturnPrompt | undefined +
client.empathicVoice.prompts.getPromptVersion({ ...params }) -> Hume.ReturnPrompt | null
@@ -2435,7 +2330,10 @@ See our [prompting guide](/docs/speech-to-speech-evi/guides/phone-calling) for t
```typescript -await client.empathicVoice.prompts.getPromptVersion("af699d45-2985-42cc-91b9-af9e5da3bac5", 0); +await client.empathicVoice.prompts.getPromptVersion({ + id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + version: 0 +}); ```
@@ -2451,21 +2349,7 @@ await client.empathicVoice.prompts.getPromptVersion("af699d45-2985-42cc-91b9-af9
-**id:** `string` — Identifier for a Prompt. Formatted as a UUID. - -
-
- -
-
- -**version:** `number` - -Version number for a Prompt. - -Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. +**request:** `Hume.empathicVoice.GetPromptVersionPromptsRequest`
@@ -2485,7 +2369,7 @@ Version numbers are integer values representing different iterations of the Prom
-
client.empathicVoice.prompts.deletePromptVersion(id, version) -> void +
client.empathicVoice.prompts.deletePromptVersion({ ...params }) -> void
@@ -2514,7 +2398,10 @@ See our [prompting guide](/docs/speech-to-speech-evi/guides/phone-calling) for t
```typescript -await client.empathicVoice.prompts.deletePromptVersion("af699d45-2985-42cc-91b9-af9e5da3bac5", 1); +await client.empathicVoice.prompts.deletePromptVersion({ + id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + version: 1 +}); ```
@@ -2530,21 +2417,7 @@ await client.empathicVoice.prompts.deletePromptVersion("af699d45-2985-42cc-91b9-
-**id:** `string` — Identifier for a Prompt. Formatted as a UUID. - -
-
- -
-
- -**version:** `number` - -Version number for a Prompt. - -Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. +**request:** `Hume.empathicVoice.DeletePromptVersionPromptsRequest`
@@ -2564,7 +2437,7 @@ Version numbers are integer values representing different iterations of the Prom
-
client.empathicVoice.prompts.updatePromptDescription(id, version, { ...params }) -> Hume.ReturnPrompt | undefined +
client.empathicVoice.prompts.updatePromptDescription({ ...params }) -> Hume.ReturnPrompt | null
@@ -2593,7 +2466,9 @@ See our [prompting guide](/docs/speech-to-speech-evi/guides/phone-calling) for t
```typescript -await client.empathicVoice.prompts.updatePromptDescription("af699d45-2985-42cc-91b9-af9e5da3bac5", 1, { +await client.empathicVoice.prompts.updatePromptDescription({ + id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + version: 1, versionDescription: "This is an updated version_description." }); @@ -2611,28 +2486,6 @@ await client.empathicVoice.prompts.updatePromptDescription("af699d45-2985-42cc-9
-**id:** `string` — Identifier for a Prompt. Formatted as a UUID. - -
-
- -
-
- -**version:** `number` - -Version number for a Prompt. - -Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. - -
-
- -
-
- **request:** `Hume.empathicVoice.PostedPromptVersionDescription`
@@ -2654,7 +2507,7 @@ Version numbers are integer values representing different iterations of the Prom
## EmpathicVoice Tools -
client.empathicVoice.tools.listTools({ ...params }) -> core.Page<(Hume.ReturnUserDefinedTool | undefined), Hume.ReturnPagedUserDefinedTools> +
client.empathicVoice.tools.listTools({ ...params }) -> core.Page<(Hume.ReturnUserDefinedTool | null), Hume.ReturnPagedUserDefinedTools>
@@ -2717,7 +2570,7 @@ const response = page.response;
-**request:** `Hume.empathicVoice.ToolsListToolsRequest` +**request:** `Hume.empathicVoice.ListToolsToolsRequest`
@@ -2737,7 +2590,7 @@ const response = page.response;
-
client.empathicVoice.tools.createTool({ ...params }) -> Hume.ReturnUserDefinedTool | undefined +
client.empathicVoice.tools.createTool({ ...params }) -> Hume.ReturnUserDefinedTool | null
@@ -2808,7 +2661,7 @@ await client.empathicVoice.tools.createTool({
-
client.empathicVoice.tools.listToolVersions(id, { ...params }) -> core.Page<(Hume.ReturnUserDefinedTool | undefined), Hume.ReturnPagedUserDefinedTools> +
client.empathicVoice.tools.listToolVersions({ ...params }) -> core.Page<(Hume.ReturnUserDefinedTool | null), Hume.ReturnPagedUserDefinedTools>
@@ -2837,13 +2690,17 @@ Refer to our [tool use](/docs/speech-to-speech-evi/features/tool-use#function-ca
```typescript -const pageableResponse = await client.empathicVoice.tools.listToolVersions("00183a3f-79ba-413d-9f3b-609864268bea"); +const pageableResponse = await client.empathicVoice.tools.listToolVersions({ + id: "00183a3f-79ba-413d-9f3b-609864268bea" +}); for await (const item of pageableResponse) { console.log(item); } // Or you can manually iterate page-by-page -let page = await client.empathicVoice.tools.listToolVersions("00183a3f-79ba-413d-9f3b-609864268bea"); +let page = await client.empathicVoice.tools.listToolVersions({ + id: "00183a3f-79ba-413d-9f3b-609864268bea" +}); while (page.hasNextPage()) { page = page.getNextPage(); } @@ -2865,15 +2722,7 @@ const response = page.response;
-**id:** `string` — Identifier for a Tool. Formatted as a UUID. - -
-
- -
-
- -**request:** `Hume.empathicVoice.ToolsListToolVersionsRequest` +**request:** `Hume.empathicVoice.ListToolVersionsToolsRequest`
@@ -2893,7 +2742,7 @@ const response = page.response;
-
client.empathicVoice.tools.createToolVersion(id, { ...params }) -> Hume.ReturnUserDefinedTool | undefined +
client.empathicVoice.tools.createToolVersion({ ...params }) -> Hume.ReturnUserDefinedTool | null
@@ -2922,7 +2771,8 @@ Refer to our [tool use](/docs/speech-to-speech-evi/features/tool-use#function-ca
```typescript -await client.empathicVoice.tools.createToolVersion("00183a3f-79ba-413d-9f3b-609864268bea", { +await client.empathicVoice.tools.createToolVersion({ + id: "00183a3f-79ba-413d-9f3b-609864268bea", parameters: "{ \"type\": \"object\", \"properties\": { \"location\": { \"type\": \"string\", \"description\": \"The city and state, e.g. San Francisco, CA\" }, \"format\": { \"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\", \"kelvin\"], \"description\": \"The temperature unit to use. Infer this from the users location.\" } }, \"required\": [\"location\", \"format\"] }", versionDescription: "Fetches current weather and uses celsius, fahrenheit, or kelvin based on location of user.", fallbackContent: "Unable to fetch current weather.", @@ -2943,14 +2793,6 @@ await client.empathicVoice.tools.createToolVersion("00183a3f-79ba-413d-9f3b-6098
-**id:** `string` — Identifier for a Tool. Formatted as a UUID. - -
-
- -
-
- **request:** `Hume.empathicVoice.PostedUserDefinedToolVersion`
@@ -2971,7 +2813,7 @@ await client.empathicVoice.tools.createToolVersion("00183a3f-79ba-413d-9f3b-6098
-
client.empathicVoice.tools.deleteTool(id) -> void +
client.empathicVoice.tools.deleteTool({ ...params }) -> void
@@ -3000,7 +2842,9 @@ Refer to our [tool use](/docs/speech-to-speech-evi/features/tool-use#function-ca
```typescript -await client.empathicVoice.tools.deleteTool("00183a3f-79ba-413d-9f3b-609864268bea"); +await client.empathicVoice.tools.deleteTool({ + id: "00183a3f-79ba-413d-9f3b-609864268bea" +}); ```
@@ -3016,7 +2860,7 @@ await client.empathicVoice.tools.deleteTool("00183a3f-79ba-413d-9f3b-609864268be
-**id:** `string` — Identifier for a Tool. Formatted as a UUID. +**request:** `Hume.empathicVoice.DeleteToolToolsRequest`
@@ -3036,7 +2880,7 @@ await client.empathicVoice.tools.deleteTool("00183a3f-79ba-413d-9f3b-609864268be
-
client.empathicVoice.tools.updateToolName(id, { ...params }) -> string +
client.empathicVoice.tools.updateToolName({ ...params }) -> string
@@ -3065,7 +2909,8 @@ Refer to our [tool use](/docs/speech-to-speech-evi/features/tool-use#function-ca
```typescript -await client.empathicVoice.tools.updateToolName("00183a3f-79ba-413d-9f3b-609864268bea", { +await client.empathicVoice.tools.updateToolName({ + id: "00183a3f-79ba-413d-9f3b-609864268bea", name: "get_current_temperature" }); @@ -3083,14 +2928,6 @@ await client.empathicVoice.tools.updateToolName("00183a3f-79ba-413d-9f3b-6098642
-**id:** `string` — Identifier for a Tool. Formatted as a UUID. - -
-
- -
-
- **request:** `Hume.empathicVoice.PostedUserDefinedToolName`
@@ -3111,7 +2948,7 @@ await client.empathicVoice.tools.updateToolName("00183a3f-79ba-413d-9f3b-6098642
-
client.empathicVoice.tools.getToolVersion(id, version) -> Hume.ReturnUserDefinedTool | undefined +
client.empathicVoice.tools.getToolVersion({ ...params }) -> Hume.ReturnUserDefinedTool | null
@@ -3140,7 +2977,10 @@ Refer to our [tool use](/docs/speech-to-speech-evi/features/tool-use#function-ca
```typescript -await client.empathicVoice.tools.getToolVersion("00183a3f-79ba-413d-9f3b-609864268bea", 1); +await client.empathicVoice.tools.getToolVersion({ + id: "00183a3f-79ba-413d-9f3b-609864268bea", + version: 1 +}); ```
@@ -3156,21 +2996,7 @@ await client.empathicVoice.tools.getToolVersion("00183a3f-79ba-413d-9f3b-6098642
-**id:** `string` — Identifier for a Tool. Formatted as a UUID. - -
-
- -
-
- -**version:** `number` - -Version number for a Tool. - -Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. +**request:** `Hume.empathicVoice.GetToolVersionToolsRequest`
@@ -3190,7 +3016,7 @@ Version numbers are integer values representing different iterations of the Tool
-
client.empathicVoice.tools.deleteToolVersion(id, version) -> void +
client.empathicVoice.tools.deleteToolVersion({ ...params }) -> void
@@ -3219,7 +3045,10 @@ Refer to our [tool use](/docs/speech-to-speech-evi/features/tool-use#function-ca
```typescript -await client.empathicVoice.tools.deleteToolVersion("00183a3f-79ba-413d-9f3b-609864268bea", 1); +await client.empathicVoice.tools.deleteToolVersion({ + id: "00183a3f-79ba-413d-9f3b-609864268bea", + version: 1 +}); ```
@@ -3235,21 +3064,7 @@ await client.empathicVoice.tools.deleteToolVersion("00183a3f-79ba-413d-9f3b-6098
-**id:** `string` — Identifier for a Tool. Formatted as a UUID. - -
-
- -
-
- -**version:** `number` - -Version number for a Tool. - -Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. +**request:** `Hume.empathicVoice.DeleteToolVersionToolsRequest`
@@ -3269,7 +3084,7 @@ Version numbers are integer values representing different iterations of the Tool
-
client.empathicVoice.tools.updateToolDescription(id, version, { ...params }) -> Hume.ReturnUserDefinedTool | undefined +
client.empathicVoice.tools.updateToolDescription({ ...params }) -> Hume.ReturnUserDefinedTool | null
@@ -3298,7 +3113,9 @@ Refer to our [tool use](/docs/speech-to-speech-evi/features/tool-use#function-ca
```typescript -await client.empathicVoice.tools.updateToolDescription("00183a3f-79ba-413d-9f3b-609864268bea", 1, { +await client.empathicVoice.tools.updateToolDescription({ + id: "00183a3f-79ba-413d-9f3b-609864268bea", + version: 1, versionDescription: "Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region." }); @@ -3316,28 +3133,6 @@ await client.empathicVoice.tools.updateToolDescription("00183a3f-79ba-413d-9f3b-
-**id:** `string` — Identifier for a Tool. Formatted as a UUID. - -
-
- -
-
- -**version:** `number` - -Version number for a Tool. - -Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. - -
-
- -
-
- **request:** `Hume.empathicVoice.PostedUserDefinedToolVersionDescription`
@@ -3402,7 +3197,7 @@ await client.expressionMeasurement.batch.listJobs();
-**request:** `Hume.expressionMeasurement.batch.BatchListJobsRequest` +**request:** `Hume.expressionMeasurement.batch.ListJobsBatchRequest`
@@ -3488,7 +3283,7 @@ await client.expressionMeasurement.batch.startInferenceJob({
-
client.expressionMeasurement.batch.getJobDetails(id) -> Hume.UnionJob +
client.expressionMeasurement.batch.getJobDetails({ ...params }) -> Hume.UnionJob
@@ -3515,7 +3310,9 @@ Get the request details and state of a given job.
```typescript -await client.expressionMeasurement.batch.getJobDetails("job_id"); +await client.expressionMeasurement.batch.getJobDetails({ + id: "job_id" +}); ```
@@ -3531,7 +3328,7 @@ await client.expressionMeasurement.batch.getJobDetails("job_id");
-**id:** `string` — The unique identifier for the job. +**request:** `Hume.expressionMeasurement.batch.GetJobDetailsBatchRequest`
@@ -3551,7 +3348,7 @@ await client.expressionMeasurement.batch.getJobDetails("job_id");
-
client.expressionMeasurement.batch.getJobPredictions(id) -> Hume.UnionPredictResult[] +
client.expressionMeasurement.batch.getJobPredictions({ ...params }) -> Hume.UnionPredictResult[]
@@ -3578,7 +3375,9 @@ Get the JSON predictions of a completed inference job.
```typescript -await client.expressionMeasurement.batch.getJobPredictions("job_id"); +await client.expressionMeasurement.batch.getJobPredictions({ + id: "job_id" +}); ```
@@ -3594,7 +3393,7 @@ await client.expressionMeasurement.batch.getJobPredictions("job_id");
-**id:** `string` — The unique identifier for the job. +**request:** `Hume.expressionMeasurement.batch.GetJobPredictionsBatchRequest`
@@ -3659,7 +3458,7 @@ await client.expressionMeasurement.batch.startInferenceJobFromLocalFile({
-**request:** `Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest` +**request:** `Hume.expressionMeasurement.batch.StartInferenceJobFromLocalFileBatchRequest`
diff --git a/src/Client.ts b/src/Client.ts index bc335ad0..e10b2d57 100644 --- a/src/Client.ts +++ b/src/Client.ts @@ -27,8 +27,8 @@ export class HumeClient { { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.15.7", - "User-Agent": "hume/0.15.7", + "X-Fern-SDK-Version": "0.15.8", + "User-Agent": "hume/0.15.8", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, }, diff --git a/src/api/resources/empathicVoice/resources/chat/client/Client.ts.diff b/src/api/resources/empathicVoice/resources/chat/client/Client.ts.diff new file mode 100644 index 00000000..6b5cd5a0 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/chat/client/Client.ts.diff @@ -0,0 +1,182 @@ +diff --git a/src/api/resources/empathicVoice/resources/chat/client/Client.ts b/src/api/resources/empathicVoice/resources/chat/client/Client.ts +index 210954d9..37d17b01 100644 +--- a/src/api/resources/empathicVoice/resources/chat/client/Client.ts ++++ b/src/api/resources/empathicVoice/resources/chat/client/Client.ts +@@ -1,36 +1,26 @@ +-/** THIS FILE IS MANUALLY MAINTAINED: see .fernignore */ ++// This file was auto-generated by Fern from our API Definition. + +-import * as environments from "../../../../../../environments.js"; ++import type { BaseClientOptions } from "../../../../../../BaseClient.js"; ++import { mergeHeaders, mergeOnlyDefinedHeaders } from "../../../../../../core/headers.js"; + import * as core from "../../../../../../core/index.js"; +-import * as Hume from "../../../../../index.js"; +-import { mergeOnlyDefinedHeaders, mergeHeaders } from "../../../../../../core/headers.js"; ++import * as environments from "../../../../../../environments.js"; + import * as serializers from "../../../../../../serialization/index.js"; ++import type * as Hume from "../../../../../index.js"; + import { ChatSocket } from "./Socket.js"; + + export declare namespace Chat { +- export interface Options { +- environment?: core.Supplier; +- /** Specify a custom URL to connect the client to. */ +- baseUrl?: core.Supplier; +- apiKey?: core.Supplier; +- /** Additional headers to include in requests. */ +- headers?: Record | null | undefined>; +- } ++ export interface Options extends BaseClientOptions {} + + export interface ConnectArgs { + accessToken?: string | undefined; ++ allowConnection?: boolean | undefined; + configId?: string | undefined; +- configVersion?: string | number | undefined; ++ configVersion?: number | undefined; + eventLimit?: number | undefined; + resumedChatGroupId?: string | undefined; + verboseTranscription?: boolean | undefined; +- allowConnection?: boolean | undefined; +- /** @deprecated Use sessionSettings.voiceId instead */ +- voiceId?: string | undefined; + apiKey?: string | undefined; +- sessionSettings?: Hume.empathicVoice.ConnectSessionSettings; +- /** Extra query parameters sent at WebSocket connection */ +- queryParams?: Record; ++ sessionSettings: Hume.empathicVoice.ConnectSessionSettings; + /** Arbitrary headers to send with the websocket connect request. */ + headers?: Record; + /** Enable debug mode on the websocket. Defaults to false. */ +@@ -47,91 +37,69 @@ export class Chat { + this._options = _options; + } + +- public connect(args: Chat.ConnectArgs = {}): ChatSocket { ++ public async connect(args: Chat.ConnectArgs): Promise { + const { + accessToken, ++ allowConnection, + configId, + configVersion, + eventLimit, + resumedChatGroupId, + verboseTranscription, +- voiceId, + apiKey, + sessionSettings, +- queryParams, + headers, + debug, + reconnectAttempts, +- allowConnection, + } = args; + const _queryParams: Record = {}; +- + if (accessToken != null) { +- _queryParams["access_token"] = accessToken; ++ _queryParams.access_token = accessToken; ++ } ++ ++ if (allowConnection != null) { ++ _queryParams.allow_connection = allowConnection.toString(); + } + + if (configId != null) { +- _queryParams["config_id"] = configId; ++ _queryParams.config_id = configId; + } + + if (configVersion != null) { +- _queryParams["config_version"] = +- typeof configVersion === "number" ? configVersion.toString() : configVersion; ++ _queryParams.config_version = configVersion.toString(); + } + + if (eventLimit != null) { +- _queryParams["event_limit"] = eventLimit.toString(); ++ _queryParams.event_limit = eventLimit.toString(); + } + + if (resumedChatGroupId != null) { +- _queryParams["resumed_chat_group_id"] = resumedChatGroupId; ++ _queryParams.resumed_chat_group_id = resumedChatGroupId; + } + + if (verboseTranscription != null) { +- _queryParams["verbose_transcription"] = verboseTranscription.toString(); +- } +- +- if (voiceId != null) { +- _queryParams["voice_id"] = voiceId; ++ _queryParams.verbose_transcription = verboseTranscription.toString(); + } + + if (apiKey != null) { +- _queryParams["api_key"] = apiKey; +- } +- +- if (allowConnection != null) { +- _queryParams["allow_connection"] = allowConnection === true ? "true" : "false"; ++ _queryParams.api_key = apiKey; + } + +- if (sessionSettings != null) { +- _queryParams["session_settings"] = serializers.empathicVoice.ConnectSessionSettings.jsonOrThrow( +- sessionSettings, +- { +- unrecognizedObjectKeys: "passthrough", +- allowUnrecognizedUnionMembers: true, +- allowUnrecognizedEnumValues: true, +- omitUndefined: true, +- breadcrumbsPrefix: ["request", "sessionSettings"], +- }, +- ); +- } +- +- // Merge in any additional query parameters +- if (queryParams != null) { +- for (const [name, value] of Object.entries(queryParams)) { +- _queryParams[name] = value; +- } +- } +- +- let _headers: Record = mergeHeaders( +- mergeOnlyDefinedHeaders({ ...this._getCustomAuthorizationHeaders() }), ++ _queryParams.session_settings = serializers.empathicVoice.ConnectSessionSettings.jsonOrThrow(sessionSettings, { ++ unrecognizedObjectKeys: "passthrough", ++ allowUnrecognizedUnionMembers: true, ++ allowUnrecognizedEnumValues: true, ++ omitUndefined: true, ++ breadcrumbsPrefix: ["request", "sessionSettings"], ++ }); ++ const _headers: Record = mergeHeaders( ++ mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), + headers, + ); +- + const socket = new core.ReconnectingWebSocket({ + url: core.url.join( +- core.Supplier.get(this._options["baseUrl"]) ?? +- (core.Supplier.get(this._options["environment"]) ?? environments.HumeEnvironment.Prod).evi, ++ (await core.Supplier.get(this._options.baseUrl)) ?? ++ ((await core.Supplier.get(this._options.environment)) ?? environments.HumeEnvironment.Prod).evi, + "/chat", + ), + protocols: [], +@@ -142,12 +110,8 @@ export class Chat { + return new ChatSocket({ socket }); + } + +- protected _getCustomAuthorizationHeaders(): Record { +- const apiKeyValue = core.Supplier.get(this._options.apiKey); +- // This `authHeaderValue` is manually added as if you don't provide it it will +- // be omitted from the headers which means it won't reach the logic in ws.ts that +- // extracts values from the headers and adds them to query parameters. +- const authHeaderValue = core.Supplier.get(this._options.headers?.authorization); +- return { "X-Hume-Api-Key": apiKeyValue, Authorization: authHeaderValue }; ++ protected async _getCustomAuthorizationHeaders(): Promise> { ++ const apiKeyValue = await core.Supplier.get(this._options.apiKey); ++ return { "X-Hume-Api-Key": apiKeyValue }; + } + } diff --git a/src/api/resources/empathicVoice/resources/chat/client/Socket.ts.diff b/src/api/resources/empathicVoice/resources/chat/client/Socket.ts.diff new file mode 100644 index 00000000..819adf6a --- /dev/null +++ b/src/api/resources/empathicVoice/resources/chat/client/Socket.ts.diff @@ -0,0 +1,139 @@ +diff --git a/src/api/resources/empathicVoice/resources/chat/client/Socket.ts b/src/api/resources/empathicVoice/resources/chat/client/Socket.ts +index a55c7b58..0f2f2a55 100644 +--- a/src/api/resources/empathicVoice/resources/chat/client/Socket.ts ++++ b/src/api/resources/empathicVoice/resources/chat/client/Socket.ts +@@ -1,17 +1,17 @@ +-/** THIS FILE IS MANUALLY MAINTAINED: see .fernignore */ ++// This file was auto-generated by Fern from our API Definition. + + import * as core from "../../../../../../core/index.js"; +-import * as Hume from "../../../../../index.js"; +-import { PublishEvent } from "../../../../../../serialization/resources/empathicVoice/resources/chat/types/PublishEvent.js"; + import { fromJson } from "../../../../../../core/json.js"; + import * as serializers from "../../../../../../serialization/index.js"; ++import { PublishEvent } from "../../../../../../serialization/resources/empathicVoice/resources/chat/types/PublishEvent.js"; ++import type * as Hume from "../../../../../index.js"; + + export declare namespace ChatSocket { + export interface Args { + socket: core.ReconnectingWebSocket; + } + +- export type Response = Hume.empathicVoice.SubscribeEvent & { receivedAt: Date }; ++ export type Response = Hume.empathicVoice.SubscribeEvent; + type EventHandlers = { + open?: () => void; + message?: (message: Response) => void; +@@ -37,10 +37,7 @@ export class ChatSocket { + omitUndefined: true, + }); + if (parsedResponse.ok) { +- this.eventHandlers.message?.({ +- ...parsedResponse.value, +- receivedAt: new Date(), +- }); ++ this.eventHandlers.message?.(parsedResponse.value); + } else { + this.eventHandlers.error?.(new Error("Received unknown message type")); + } +@@ -92,86 +89,6 @@ export class ChatSocket { + this.socket.send(JSON.stringify(jsonPayload)); + } + +- /** +- * Send audio input +- */ +- public sendAudioInput(message: Omit): void { +- this.sendPublish({ +- type: "audio_input", +- ...message, +- }); +- } +- +- /** +- * Send session settings +- */ +- public sendSessionSettings(message: Omit = {}): void { +- this.sendPublish({ +- type: "session_settings", +- ...message, +- }); +- } +- +- /** +- * Send assistant input +- */ +- public sendAssistantInput(message: Omit): void { +- this.sendPublish({ +- type: "assistant_input", +- ...message, +- }); +- } +- +- /** +- * Send pause assistant message +- */ +- public pauseAssistant(message: Omit = {}): void { +- this.sendPublish({ +- type: "pause_assistant_message", +- ...message, +- }); +- } +- +- /** +- * Send resume assistant message +- */ +- public resumeAssistant(message: Omit = {}): void { +- this.sendPublish({ +- type: "resume_assistant_message", +- ...message, +- }); +- } +- +- /** +- * Send tool response message +- */ +- public sendToolResponseMessage(message: Omit): void { +- this.sendPublish({ +- type: "tool_response", +- ...message, +- }); +- } +- +- /** +- * Send tool error message +- */ +- public sendToolErrorMessage(message: Omit): void { +- this.sendPublish({ +- type: "tool_error", +- ...message, +- }); +- } +- +- /** +- * Send text input +- */ +- public sendUserInput(text: string): void { +- this.sendPublish({ +- type: "user_input", +- text, +- }); +- } +- + /** Connect to the websocket and register event handlers. */ + public connect(): ChatSocket { + this.socket.reconnect(); +@@ -213,13 +130,6 @@ export class ChatSocket { + }); + } + +- /** +- * @deprecated Use waitForOpen() instead +- */ +- public async tillSocketOpen(): Promise { +- return this.waitForOpen(); +- } +- + /** Asserts that the websocket is open. */ + private assertSocketIsOpen(): void { + if (!this.socket) { diff --git a/src/api/resources/empathicVoice/resources/chat/client/index.ts.diff b/src/api/resources/empathicVoice/resources/chat/client/index.ts.diff new file mode 100644 index 00000000..4e0b8ef6 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/chat/client/index.ts.diff @@ -0,0 +1,9 @@ +diff --git a/src/api/resources/empathicVoice/resources/chat/client/index.ts b/src/api/resources/empathicVoice/resources/chat/client/index.ts +index 38d5d5ff..cb0ff5c3 100644 +--- a/src/api/resources/empathicVoice/resources/chat/client/index.ts ++++ b/src/api/resources/empathicVoice/resources/chat/client/index.ts +@@ -1,3 +1 @@ +-/** THIS FILE IS MANUALLY MAINTAINED: see .fernignore */ +-export { ChatSocket } from "./Socket.js"; +-export { Chat } from "./Client.js"; ++export {}; diff --git a/src/api/resources/empathicVoice/resources/chat/index.ts.diff b/src/api/resources/empathicVoice/resources/chat/index.ts.diff new file mode 100644 index 00000000..8c84c118 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/chat/index.ts.diff @@ -0,0 +1,13 @@ +diff --git a/src/api/resources/empathicVoice/resources/chat/index.ts b/src/api/resources/empathicVoice/resources/chat/index.ts +index a2f33dc7..d9adb1af 100644 +--- a/src/api/resources/empathicVoice/resources/chat/index.ts ++++ b/src/api/resources/empathicVoice/resources/chat/index.ts +@@ -1,7 +1,2 @@ +-export * from "./types/index.js"; + export * from "./client/index.js"; +-/** +- * @deprecated Use `Hume.empathicVoice.SubscribeEvent` instead. +- * This type alias will be removed in a future version. +- */ +-export type { SubscribeEvent } from "./types/SubscribeEvent.js"; ++export * from "./types/index.js"; diff --git a/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts b/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts index 0345fbab..71089d08 100644 --- a/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts @@ -24,7 +24,7 @@ export class ChatGroups { /** * Fetches a paginated list of **Chat Groups**. * - * @param {Hume.empathicVoice.ChatGroupsListChatGroupsRequest} request + * @param {Hume.empathicVoice.ListChatGroupsChatGroupsRequest} request * @param {ChatGroups.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} @@ -38,12 +38,12 @@ export class ChatGroups { * }) */ public async listChatGroups( - request: Hume.empathicVoice.ChatGroupsListChatGroupsRequest = {}, + request: Hume.empathicVoice.ListChatGroupsChatGroupsRequest = {}, requestOptions?: ChatGroups.RequestOptions, ): Promise> { const list = core.HttpResponsePromise.interceptFunction( async ( - request: Hume.empathicVoice.ChatGroupsListChatGroupsRequest, + request: Hume.empathicVoice.ListChatGroupsChatGroupsRequest, ): Promise> => { const { pageNumber, pageSize, ascendingOrder, configId } = request; const _queryParams: Record = {}; @@ -147,38 +147,32 @@ export class ChatGroups { /** * Fetches a **ChatGroup** by ID, including a paginated list of **Chats** associated with the **ChatGroup**. * - * @param {string} id - Identifier for a Chat Group. Formatted as a UUID. - * @param {Hume.empathicVoice.ChatGroupsGetChatGroupRequest} request + * @param {Hume.empathicVoice.GetChatGroupChatGroupsRequest} request * @param {ChatGroups.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.chatGroups.getChatGroup("697056f0-6c7e-487d-9bd8-9c19df79f05f", { + * await client.empathicVoice.chatGroups.getChatGroup({ + * id: "697056f0-6c7e-487d-9bd8-9c19df79f05f", * pageNumber: 0, * pageSize: 1, * ascendingOrder: true * }) */ public getChatGroup( - id: string, - request: Hume.empathicVoice.ChatGroupsGetChatGroupRequest = {}, + request: Hume.empathicVoice.GetChatGroupChatGroupsRequest, requestOptions?: ChatGroups.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__getChatGroup(id, request, requestOptions)); + return core.HttpResponsePromise.fromPromise(this.__getChatGroup(request, requestOptions)); } private async __getChatGroup( - id: string, - request: Hume.empathicVoice.ChatGroupsGetChatGroupRequest = {}, + request: Hume.empathicVoice.GetChatGroupChatGroupsRequest, requestOptions?: ChatGroups.RequestOptions, ): Promise> { - const { status, pageSize, pageNumber, ascendingOrder } = request; + const { id, pageSize, pageNumber, ascendingOrder } = request; const _queryParams: Record = {}; - if (status != null) { - _queryParams.status = status; - } - if (pageSize != null) { _queryParams.page_size = pageSize.toString(); } @@ -266,33 +260,31 @@ export class ChatGroups { /** * Fetches a paginated list of audio for each **Chat** within the specified **Chat Group**. For more details, see our guide on audio reconstruction [here](/docs/speech-to-speech-evi/faq#can-i-access-the-audio-of-previous-conversations-with-evi). * - * @param {string} id - Identifier for a Chat Group. Formatted as a UUID. - * @param {Hume.empathicVoice.ChatGroupsGetAudioRequest} request + * @param {Hume.empathicVoice.GetAudioChatGroupsRequest} request * @param {ChatGroups.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.chatGroups.getAudio("369846cf-6ad5-404d-905e-a8acb5cdfc78", { + * await client.empathicVoice.chatGroups.getAudio({ + * id: "369846cf-6ad5-404d-905e-a8acb5cdfc78", * pageNumber: 0, * pageSize: 10, * ascendingOrder: true * }) */ public getAudio( - id: string, - request: Hume.empathicVoice.ChatGroupsGetAudioRequest = {}, + request: Hume.empathicVoice.GetAudioChatGroupsRequest, requestOptions?: ChatGroups.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__getAudio(id, request, requestOptions)); + return core.HttpResponsePromise.fromPromise(this.__getAudio(request, requestOptions)); } private async __getAudio( - id: string, - request: Hume.empathicVoice.ChatGroupsGetAudioRequest = {}, + request: Hume.empathicVoice.GetAudioChatGroupsRequest, requestOptions?: ChatGroups.RequestOptions, ): Promise> { - const { pageNumber, pageSize, ascendingOrder } = request; + const { id, pageNumber, pageSize, ascendingOrder } = request; const _queryParams: Record = {}; if (pageNumber != null) { _queryParams.page_number = pageNumber.toString(); @@ -381,29 +373,28 @@ export class ChatGroups { /** * Fetches a paginated list of **Chat** events associated with a **Chat Group**. * - * @param {string} id - Identifier for a Chat Group. Formatted as a UUID. - * @param {Hume.empathicVoice.ChatGroupsListChatGroupEventsRequest} request + * @param {Hume.empathicVoice.ListChatGroupEventsChatGroupsRequest} request * @param {ChatGroups.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.chatGroups.listChatGroupEvents("697056f0-6c7e-487d-9bd8-9c19df79f05f", { + * await client.empathicVoice.chatGroups.listChatGroupEvents({ + * id: "697056f0-6c7e-487d-9bd8-9c19df79f05f", * pageNumber: 0, * pageSize: 3, * ascendingOrder: true * }) */ public async listChatGroupEvents( - id: string, - request: Hume.empathicVoice.ChatGroupsListChatGroupEventsRequest = {}, + request: Hume.empathicVoice.ListChatGroupEventsChatGroupsRequest, requestOptions?: ChatGroups.RequestOptions, ): Promise> { const list = core.HttpResponsePromise.interceptFunction( async ( - request: Hume.empathicVoice.ChatGroupsListChatGroupEventsRequest, + request: Hume.empathicVoice.ListChatGroupEventsChatGroupsRequest, ): Promise> => { - const { pageSize, pageNumber, ascendingOrder } = request; + const { id, pageSize, pageNumber, ascendingOrder } = request; const _queryParams: Record = {}; if (pageSize != null) { _queryParams.page_size = pageSize.toString(); diff --git a/src/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsGetAudioRequest.ts b/src/api/resources/empathicVoice/resources/chatGroups/client/requests/GetAudioChatGroupsRequest.ts similarity index 86% rename from src/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsGetAudioRequest.ts rename to src/api/resources/empathicVoice/resources/chatGroups/client/requests/GetAudioChatGroupsRequest.ts index 079882d5..733db7f5 100644 --- a/src/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsGetAudioRequest.ts +++ b/src/api/resources/empathicVoice/resources/chatGroups/client/requests/GetAudioChatGroupsRequest.ts @@ -3,12 +3,15 @@ /** * @example * { + * id: "369846cf-6ad5-404d-905e-a8acb5cdfc78", * pageNumber: 0, * pageSize: 10, * ascendingOrder: true * } */ -export interface ChatGroupsGetAudioRequest { +export interface GetAudioChatGroupsRequest { + /** Identifier for a Chat Group. Formatted as a UUID. */ + id: string; /** * Specifies the page number to retrieve, enabling pagination. * diff --git a/src/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsGetChatGroupRequest.ts b/src/api/resources/empathicVoice/resources/chatGroups/client/requests/GetChatGroupChatGroupsRequest.ts similarity index 86% rename from src/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsGetChatGroupRequest.ts rename to src/api/resources/empathicVoice/resources/chatGroups/client/requests/GetChatGroupChatGroupsRequest.ts index 01a33b63..291566cb 100644 --- a/src/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsGetChatGroupRequest.ts +++ b/src/api/resources/empathicVoice/resources/chatGroups/client/requests/GetChatGroupChatGroupsRequest.ts @@ -3,14 +3,15 @@ /** * @example * { + * id: "697056f0-6c7e-487d-9bd8-9c19df79f05f", * pageNumber: 0, * pageSize: 1, * ascendingOrder: true * } */ -export interface ChatGroupsGetChatGroupRequest { - /** Chat status to apply to the chat. String from the ChatStatus enum. */ - status?: string; +export interface GetChatGroupChatGroupsRequest { + /** Identifier for a Chat Group. Formatted as a UUID. */ + id: string; /** * Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. * diff --git a/src/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsListChatGroupEventsRequest.ts b/src/api/resources/empathicVoice/resources/chatGroups/client/requests/ListChatGroupEventsChatGroupsRequest.ts similarity index 86% rename from src/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsListChatGroupEventsRequest.ts rename to src/api/resources/empathicVoice/resources/chatGroups/client/requests/ListChatGroupEventsChatGroupsRequest.ts index ff10d0b8..dcc7f3c6 100644 --- a/src/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsListChatGroupEventsRequest.ts +++ b/src/api/resources/empathicVoice/resources/chatGroups/client/requests/ListChatGroupEventsChatGroupsRequest.ts @@ -3,12 +3,15 @@ /** * @example * { + * id: "697056f0-6c7e-487d-9bd8-9c19df79f05f", * pageNumber: 0, * pageSize: 3, * ascendingOrder: true * } */ -export interface ChatGroupsListChatGroupEventsRequest { +export interface ListChatGroupEventsChatGroupsRequest { + /** Identifier for a Chat Group. Formatted as a UUID. */ + id: string; /** * Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. * diff --git a/src/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsListChatGroupsRequest.ts b/src/api/resources/empathicVoice/resources/chatGroups/client/requests/ListChatGroupsChatGroupsRequest.ts similarity index 96% rename from src/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsListChatGroupsRequest.ts rename to src/api/resources/empathicVoice/resources/chatGroups/client/requests/ListChatGroupsChatGroupsRequest.ts index c7cf7aec..26d5061b 100644 --- a/src/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsListChatGroupsRequest.ts +++ b/src/api/resources/empathicVoice/resources/chatGroups/client/requests/ListChatGroupsChatGroupsRequest.ts @@ -9,7 +9,7 @@ * configId: "1b60e1a0-cc59-424a-8d2c-189d354db3f3" * } */ -export interface ChatGroupsListChatGroupsRequest { +export interface ListChatGroupsChatGroupsRequest { /** * Specifies the page number to retrieve, enabling pagination. * diff --git a/src/api/resources/empathicVoice/resources/chatGroups/client/requests/index.ts b/src/api/resources/empathicVoice/resources/chatGroups/client/requests/index.ts index 82734a25..4f331668 100644 --- a/src/api/resources/empathicVoice/resources/chatGroups/client/requests/index.ts +++ b/src/api/resources/empathicVoice/resources/chatGroups/client/requests/index.ts @@ -1,4 +1,4 @@ -export type { ChatGroupsGetAudioRequest } from "./ChatGroupsGetAudioRequest.js"; -export type { ChatGroupsGetChatGroupRequest } from "./ChatGroupsGetChatGroupRequest.js"; -export type { ChatGroupsListChatGroupEventsRequest } from "./ChatGroupsListChatGroupEventsRequest.js"; -export type { ChatGroupsListChatGroupsRequest } from "./ChatGroupsListChatGroupsRequest.js"; +export type { GetAudioChatGroupsRequest } from "./GetAudioChatGroupsRequest.js"; +export type { GetChatGroupChatGroupsRequest } from "./GetChatGroupChatGroupsRequest.js"; +export type { ListChatGroupEventsChatGroupsRequest } from "./ListChatGroupEventsChatGroupsRequest.js"; +export type { ListChatGroupsChatGroupsRequest } from "./ListChatGroupsChatGroupsRequest.js"; diff --git a/src/api/resources/empathicVoice/resources/chats/client/Client.ts b/src/api/resources/empathicVoice/resources/chats/client/Client.ts index 1939868f..4acd53e0 100644 --- a/src/api/resources/empathicVoice/resources/chats/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/chats/client/Client.ts @@ -24,7 +24,7 @@ export class Chats { /** * Fetches a paginated list of **Chats**. * - * @param {Hume.empathicVoice.ChatsListChatsRequest} request + * @param {Hume.empathicVoice.ListChatsChatsRequest} request * @param {Chats.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} @@ -37,14 +37,14 @@ export class Chats { * }) */ public async listChats( - request: Hume.empathicVoice.ChatsListChatsRequest = {}, + request: Hume.empathicVoice.ListChatsChatsRequest = {}, requestOptions?: Chats.RequestOptions, ): Promise> { const list = core.HttpResponsePromise.interceptFunction( async ( - request: Hume.empathicVoice.ChatsListChatsRequest, + request: Hume.empathicVoice.ListChatsChatsRequest, ): Promise> => { - const { pageNumber, pageSize, ascendingOrder, configId, status } = request; + const { pageNumber, pageSize, ascendingOrder, configId } = request; const _queryParams: Record = {}; if (pageNumber != null) { _queryParams.page_number = pageNumber.toString(); @@ -58,9 +58,6 @@ export class Chats { if (configId != null) { _queryParams.config_id = configId; } - if (status != null) { - _queryParams.status = status; - } const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -149,29 +146,28 @@ export class Chats { /** * Fetches a paginated list of **Chat** events. * - * @param {string} id - Identifier for a Chat. Formatted as a UUID. - * @param {Hume.empathicVoice.ChatsListChatEventsRequest} request + * @param {Hume.empathicVoice.ListChatEventsChatsRequest} request * @param {Chats.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.chats.listChatEvents("470a49f6-1dec-4afe-8b61-035d3b2d63b0", { + * await client.empathicVoice.chats.listChatEvents({ + * id: "470a49f6-1dec-4afe-8b61-035d3b2d63b0", * pageNumber: 0, * pageSize: 3, * ascendingOrder: true * }) */ public async listChatEvents( - id: string, - request: Hume.empathicVoice.ChatsListChatEventsRequest = {}, + request: Hume.empathicVoice.ListChatEventsChatsRequest, requestOptions?: Chats.RequestOptions, ): Promise> { const list = core.HttpResponsePromise.interceptFunction( async ( - request: Hume.empathicVoice.ChatsListChatEventsRequest, + request: Hume.empathicVoice.ListChatEventsChatsRequest, ): Promise> => { - const { pageSize, pageNumber, ascendingOrder } = request; + const { id, pageSize, pageNumber, ascendingOrder } = request; const _queryParams: Record = {}; if (pageSize != null) { _queryParams.page_size = pageSize.toString(); @@ -270,25 +266,28 @@ export class Chats { /** * Fetches the audio of a previous **Chat**. For more details, see our guide on audio reconstruction [here](/docs/speech-to-speech-evi/faq#can-i-access-the-audio-of-previous-conversations-with-evi). * - * @param {string} id - Identifier for a chat. Formatted as a UUID. + * @param {Hume.empathicVoice.GetAudioChatsRequest} request * @param {Chats.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.chats.getAudio("470a49f6-1dec-4afe-8b61-035d3b2d63b0") + * await client.empathicVoice.chats.getAudio({ + * id: "470a49f6-1dec-4afe-8b61-035d3b2d63b0" + * }) */ public getAudio( - id: string, + request: Hume.empathicVoice.GetAudioChatsRequest, requestOptions?: Chats.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__getAudio(id, requestOptions)); + return core.HttpResponsePromise.fromPromise(this.__getAudio(request, requestOptions)); } private async __getAudio( - id: string, + request: Hume.empathicVoice.GetAudioChatsRequest, requestOptions?: Chats.RequestOptions, ): Promise> { + const { id } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), diff --git a/src/api/resources/empathicVoice/resources/chats/client/requests/GetAudioChatsRequest.ts b/src/api/resources/empathicVoice/resources/chats/client/requests/GetAudioChatsRequest.ts new file mode 100644 index 00000000..4c775dff --- /dev/null +++ b/src/api/resources/empathicVoice/resources/chats/client/requests/GetAudioChatsRequest.ts @@ -0,0 +1,12 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * @example + * { + * id: "470a49f6-1dec-4afe-8b61-035d3b2d63b0" + * } + */ +export interface GetAudioChatsRequest { + /** Identifier for a chat. Formatted as a UUID. */ + id: string; +} diff --git a/src/api/resources/empathicVoice/resources/chats/client/requests/ChatsListChatEventsRequest.ts b/src/api/resources/empathicVoice/resources/chats/client/requests/ListChatEventsChatsRequest.ts similarity index 87% rename from src/api/resources/empathicVoice/resources/chats/client/requests/ChatsListChatEventsRequest.ts rename to src/api/resources/empathicVoice/resources/chats/client/requests/ListChatEventsChatsRequest.ts index 0ee4383b..6e979478 100644 --- a/src/api/resources/empathicVoice/resources/chats/client/requests/ChatsListChatEventsRequest.ts +++ b/src/api/resources/empathicVoice/resources/chats/client/requests/ListChatEventsChatsRequest.ts @@ -3,12 +3,15 @@ /** * @example * { + * id: "470a49f6-1dec-4afe-8b61-035d3b2d63b0", * pageNumber: 0, * pageSize: 3, * ascendingOrder: true * } */ -export interface ChatsListChatEventsRequest { +export interface ListChatEventsChatsRequest { + /** Identifier for a Chat. Formatted as a UUID. */ + id: string; /** * Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. * diff --git a/src/api/resources/empathicVoice/resources/chats/client/requests/ChatsListChatsRequest.ts b/src/api/resources/empathicVoice/resources/chats/client/requests/ListChatsChatsRequest.ts similarity index 89% rename from src/api/resources/empathicVoice/resources/chats/client/requests/ChatsListChatsRequest.ts rename to src/api/resources/empathicVoice/resources/chats/client/requests/ListChatsChatsRequest.ts index 0133f5c9..872c9ce8 100644 --- a/src/api/resources/empathicVoice/resources/chats/client/requests/ChatsListChatsRequest.ts +++ b/src/api/resources/empathicVoice/resources/chats/client/requests/ListChatsChatsRequest.ts @@ -8,7 +8,7 @@ * ascendingOrder: true * } */ -export interface ChatsListChatsRequest { +export interface ListChatsChatsRequest { /** * Specifies the page number to retrieve, enabling pagination. * @@ -25,6 +25,4 @@ export interface ChatsListChatsRequest { ascendingOrder?: boolean; /** Filter to only include chats that used this config. */ configId?: string; - /** Chat status to apply to the chat. String from the ChatStatus enum. */ - status?: string; } diff --git a/src/api/resources/empathicVoice/resources/chats/client/requests/index.ts b/src/api/resources/empathicVoice/resources/chats/client/requests/index.ts index fa42e9bd..786d8aec 100644 --- a/src/api/resources/empathicVoice/resources/chats/client/requests/index.ts +++ b/src/api/resources/empathicVoice/resources/chats/client/requests/index.ts @@ -1,2 +1,3 @@ -export type { ChatsListChatEventsRequest } from "./ChatsListChatEventsRequest.js"; -export type { ChatsListChatsRequest } from "./ChatsListChatsRequest.js"; +export type { GetAudioChatsRequest } from "./GetAudioChatsRequest.js"; +export type { ListChatEventsChatsRequest } from "./ListChatEventsChatsRequest.js"; +export type { ListChatsChatsRequest } from "./ListChatsChatsRequest.js"; diff --git a/src/api/resources/empathicVoice/resources/configs/client/Client.ts b/src/api/resources/empathicVoice/resources/configs/client/Client.ts index 632a8127..6fb79386 100644 --- a/src/api/resources/empathicVoice/resources/configs/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/configs/client/Client.ts @@ -26,7 +26,7 @@ export class Configs { * * For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/speech-to-speech-evi/configuration). * - * @param {Hume.empathicVoice.ConfigsListConfigsRequest} request + * @param {Hume.empathicVoice.ListConfigsConfigsRequest} request * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} @@ -38,12 +38,12 @@ export class Configs { * }) */ public async listConfigs( - request: Hume.empathicVoice.ConfigsListConfigsRequest = {}, + request: Hume.empathicVoice.ListConfigsConfigsRequest = {}, requestOptions?: Configs.RequestOptions, ): Promise> { const list = core.HttpResponsePromise.interceptFunction( async ( - request: Hume.empathicVoice.ConfigsListConfigsRequest, + request: Hume.empathicVoice.ListConfigsConfigsRequest, ): Promise> => { const { pageNumber, pageSize, restrictToMostRecent, name } = request; const _queryParams: Record = {}; @@ -281,25 +281,25 @@ export class Configs { * * For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/speech-to-speech-evi/configuration). * - * @param {string} id - Identifier for a Config. Formatted as a UUID. - * @param {Hume.empathicVoice.ConfigsListConfigVersionsRequest} request + * @param {Hume.empathicVoice.ListConfigVersionsConfigsRequest} request * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.configs.listConfigVersions("1b60e1a0-cc59-424a-8d2c-189d354db3f3") + * await client.empathicVoice.configs.listConfigVersions({ + * id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3" + * }) */ public async listConfigVersions( - id: string, - request: Hume.empathicVoice.ConfigsListConfigVersionsRequest = {}, + request: Hume.empathicVoice.ListConfigVersionsConfigsRequest, requestOptions?: Configs.RequestOptions, ): Promise> { const list = core.HttpResponsePromise.interceptFunction( async ( - request: Hume.empathicVoice.ConfigsListConfigVersionsRequest, + request: Hume.empathicVoice.ListConfigVersionsConfigsRequest, ): Promise> => { - const { pageNumber, pageSize, restrictToMostRecent } = request; + const { id, pageNumber, pageSize, restrictToMostRecent } = request; const _queryParams: Record = {}; if (pageNumber != null) { _queryParams.page_number = pageNumber.toString(); @@ -400,14 +400,14 @@ export class Configs { * * For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/speech-to-speech-evi/configuration). * - * @param {string} id - Identifier for a Config. Formatted as a UUID. * @param {Hume.empathicVoice.PostedConfigVersion} request * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.configs.createConfigVersion("1b60e1a0-cc59-424a-8d2c-189d354db3f3", { + * await client.empathicVoice.configs.createConfigVersion({ + * id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", * versionDescription: "This is an updated version of the Weather Assistant Config.", * eviVersion: "3", * prompt: { @@ -443,18 +443,17 @@ export class Configs { * }) */ public createConfigVersion( - id: string, request: Hume.empathicVoice.PostedConfigVersion, requestOptions?: Configs.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__createConfigVersion(id, request, requestOptions)); + return core.HttpResponsePromise.fromPromise(this.__createConfigVersion(request, requestOptions)); } private async __createConfigVersion( - id: string, request: Hume.empathicVoice.PostedConfigVersion, requestOptions?: Configs.RequestOptions, ): Promise> { + const { id, ..._body } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -471,7 +470,7 @@ export class Configs { contentType: "application/json", queryParameters: requestOptions?.queryParams, requestType: "json", - body: serializers.empathicVoice.PostedConfigVersion.jsonOrThrow(request, { + body: serializers.empathicVoice.PostedConfigVersion.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip", omitUndefined: true, }), @@ -538,22 +537,28 @@ export class Configs { * * For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/speech-to-speech-evi/configuration). * - * @param {string} id - Identifier for a Config. Formatted as a UUID. + * @param {Hume.empathicVoice.DeleteConfigConfigsRequest} request * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.configs.deleteConfig("1b60e1a0-cc59-424a-8d2c-189d354db3f3") + * await client.empathicVoice.configs.deleteConfig({ + * id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3" + * }) */ - public deleteConfig(id: string, requestOptions?: Configs.RequestOptions): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__deleteConfig(id, requestOptions)); + public deleteConfig( + request: Hume.empathicVoice.DeleteConfigConfigsRequest, + requestOptions?: Configs.RequestOptions, + ): core.HttpResponsePromise { + return core.HttpResponsePromise.fromPromise(this.__deleteConfig(request, requestOptions)); } private async __deleteConfig( - id: string, + request: Hume.empathicVoice.DeleteConfigConfigsRequest, requestOptions?: Configs.RequestOptions, ): Promise> { + const { id } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -622,30 +627,29 @@ export class Configs { * * For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/speech-to-speech-evi/configuration). * - * @param {string} id - Identifier for a Config. Formatted as a UUID. * @param {Hume.empathicVoice.PostedConfigName} request * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.configs.updateConfigName("1b60e1a0-cc59-424a-8d2c-189d354db3f3", { + * await client.empathicVoice.configs.updateConfigName({ + * id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", * name: "Updated Weather Assistant Config Name" * }) */ public updateConfigName( - id: string, request: Hume.empathicVoice.PostedConfigName, requestOptions?: Configs.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__updateConfigName(id, request, requestOptions)); + return core.HttpResponsePromise.fromPromise(this.__updateConfigName(request, requestOptions)); } private async __updateConfigName( - id: string, request: Hume.empathicVoice.PostedConfigName, requestOptions?: Configs.RequestOptions, ): Promise> { + const { id, ..._body } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -662,7 +666,7 @@ export class Configs { contentType: "application/json", queryParameters: requestOptions?.queryParams, requestType: "json", - body: serializers.empathicVoice.PostedConfigName.jsonOrThrow(request, { + body: serializers.empathicVoice.PostedConfigName.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip", omitUndefined: true, }), @@ -721,32 +725,29 @@ export class Configs { * * For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/speech-to-speech-evi/configuration). * - * @param {string} id - Identifier for a Config. Formatted as a UUID. - * @param {number} version - Version number for a Config. - * - * Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. - * - * Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. + * @param {Hume.empathicVoice.GetConfigVersionConfigsRequest} request * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.configs.getConfigVersion("1b60e1a0-cc59-424a-8d2c-189d354db3f3", 1) + * await client.empathicVoice.configs.getConfigVersion({ + * id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + * version: 1 + * }) */ public getConfigVersion( - id: string, - version: number, + request: Hume.empathicVoice.GetConfigVersionConfigsRequest, requestOptions?: Configs.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__getConfigVersion(id, version, requestOptions)); + return core.HttpResponsePromise.fromPromise(this.__getConfigVersion(request, requestOptions)); } private async __getConfigVersion( - id: string, - version: number, + request: Hume.empathicVoice.GetConfigVersionConfigsRequest, requestOptions?: Configs.RequestOptions, ): Promise> { + const { id, version } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -826,32 +827,29 @@ export class Configs { * * For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/speech-to-speech-evi/configuration). * - * @param {string} id - Identifier for a Config. Formatted as a UUID. - * @param {number} version - Version number for a Config. - * - * Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. - * - * Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. + * @param {Hume.empathicVoice.DeleteConfigVersionConfigsRequest} request * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.configs.deleteConfigVersion("1b60e1a0-cc59-424a-8d2c-189d354db3f3", 1) + * await client.empathicVoice.configs.deleteConfigVersion({ + * id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + * version: 1 + * }) */ public deleteConfigVersion( - id: string, - version: number, + request: Hume.empathicVoice.DeleteConfigVersionConfigsRequest, requestOptions?: Configs.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__deleteConfigVersion(id, version, requestOptions)); + return core.HttpResponsePromise.fromPromise(this.__deleteConfigVersion(request, requestOptions)); } private async __deleteConfigVersion( - id: string, - version: number, + request: Hume.empathicVoice.DeleteConfigVersionConfigsRequest, requestOptions?: Configs.RequestOptions, ): Promise> { + const { id, version } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -922,39 +920,30 @@ export class Configs { * * For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/speech-to-speech-evi/configuration). * - * @param {string} id - Identifier for a Config. Formatted as a UUID. - * @param {number} version - Version number for a Config. - * - * Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. - * - * Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. * @param {Hume.empathicVoice.PostedConfigVersionDescription} request * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.configs.updateConfigDescription("1b60e1a0-cc59-424a-8d2c-189d354db3f3", 1, { + * await client.empathicVoice.configs.updateConfigDescription({ + * id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + * version: 1, * versionDescription: "This is an updated version_description." * }) */ public updateConfigDescription( - id: string, - version: number, - request: Hume.empathicVoice.PostedConfigVersionDescription = {}, + request: Hume.empathicVoice.PostedConfigVersionDescription, requestOptions?: Configs.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise( - this.__updateConfigDescription(id, version, request, requestOptions), - ); + return core.HttpResponsePromise.fromPromise(this.__updateConfigDescription(request, requestOptions)); } private async __updateConfigDescription( - id: string, - version: number, - request: Hume.empathicVoice.PostedConfigVersionDescription = {}, + request: Hume.empathicVoice.PostedConfigVersionDescription, requestOptions?: Configs.RequestOptions, ): Promise> { + const { id, version, ..._body } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -971,7 +960,7 @@ export class Configs { contentType: "application/json", queryParameters: requestOptions?.queryParams, requestType: "json", - body: serializers.empathicVoice.PostedConfigVersionDescription.jsonOrThrow(request, { + body: serializers.empathicVoice.PostedConfigVersionDescription.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip", omitUndefined: true, }), diff --git a/src/api/resources/empathicVoice/resources/configs/client/requests/DeleteConfigConfigsRequest.ts b/src/api/resources/empathicVoice/resources/configs/client/requests/DeleteConfigConfigsRequest.ts new file mode 100644 index 00000000..949355c7 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/configs/client/requests/DeleteConfigConfigsRequest.ts @@ -0,0 +1,12 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * @example + * { + * id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3" + * } + */ +export interface DeleteConfigConfigsRequest { + /** Identifier for a Config. Formatted as a UUID. */ + id: string; +} diff --git a/src/api/resources/empathicVoice/resources/configs/client/requests/DeleteConfigVersionConfigsRequest.ts b/src/api/resources/empathicVoice/resources/configs/client/requests/DeleteConfigVersionConfigsRequest.ts new file mode 100644 index 00000000..4b49a5d5 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/configs/client/requests/DeleteConfigVersionConfigsRequest.ts @@ -0,0 +1,21 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * @example + * { + * id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + * version: 1 + * } + */ +export interface DeleteConfigVersionConfigsRequest { + /** Identifier for a Config. Formatted as a UUID. */ + id: string; + /** + * Version number for a Config. + * + * Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + * + * Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. + */ + version: number; +} diff --git a/src/api/resources/empathicVoice/resources/configs/client/requests/GetConfigVersionConfigsRequest.ts b/src/api/resources/empathicVoice/resources/configs/client/requests/GetConfigVersionConfigsRequest.ts new file mode 100644 index 00000000..27863830 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/configs/client/requests/GetConfigVersionConfigsRequest.ts @@ -0,0 +1,21 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * @example + * { + * id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + * version: 1 + * } + */ +export interface GetConfigVersionConfigsRequest { + /** Identifier for a Config. Formatted as a UUID. */ + id: string; + /** + * Version number for a Config. + * + * Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + * + * Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. + */ + version: number; +} diff --git a/src/api/resources/empathicVoice/resources/configs/client/requests/ConfigsListConfigVersionsRequest.ts b/src/api/resources/empathicVoice/resources/configs/client/requests/ListConfigVersionsConfigsRequest.ts similarity index 83% rename from src/api/resources/empathicVoice/resources/configs/client/requests/ConfigsListConfigVersionsRequest.ts rename to src/api/resources/empathicVoice/resources/configs/client/requests/ListConfigVersionsConfigsRequest.ts index 90e3ad62..fc63b11c 100644 --- a/src/api/resources/empathicVoice/resources/configs/client/requests/ConfigsListConfigVersionsRequest.ts +++ b/src/api/resources/empathicVoice/resources/configs/client/requests/ListConfigVersionsConfigsRequest.ts @@ -2,9 +2,13 @@ /** * @example - * {} + * { + * id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3" + * } */ -export interface ConfigsListConfigVersionsRequest { +export interface ListConfigVersionsConfigsRequest { + /** Identifier for a Config. Formatted as a UUID. */ + id: string; /** * Specifies the page number to retrieve, enabling pagination. * diff --git a/src/api/resources/empathicVoice/resources/configs/client/requests/ConfigsListConfigsRequest.ts b/src/api/resources/empathicVoice/resources/configs/client/requests/ListConfigsConfigsRequest.ts similarity index 96% rename from src/api/resources/empathicVoice/resources/configs/client/requests/ConfigsListConfigsRequest.ts rename to src/api/resources/empathicVoice/resources/configs/client/requests/ListConfigsConfigsRequest.ts index bb17701e..fecf9b5c 100644 --- a/src/api/resources/empathicVoice/resources/configs/client/requests/ConfigsListConfigsRequest.ts +++ b/src/api/resources/empathicVoice/resources/configs/client/requests/ListConfigsConfigsRequest.ts @@ -7,7 +7,7 @@ * pageSize: 1 * } */ -export interface ConfigsListConfigsRequest { +export interface ListConfigsConfigsRequest { /** * Specifies the page number to retrieve, enabling pagination. * diff --git a/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts b/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts index 99d29198..2ed13fb7 100644 --- a/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts +++ b/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts @@ -38,14 +38,14 @@ import type * as Hume from "../../../../../../index.js"; */ export interface PostedConfig { /** List of built-in tools associated with this Config. */ - builtinTools?: (Hume.empathicVoice.PostedBuiltinTool | undefined)[]; + builtinTools?: (Hume.empathicVoice.PostedBuiltinTool | null)[] | null; /** * The eLLM setup associated with this Config. * * Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. */ - ellmModel?: Hume.empathicVoice.PostedEllmModel; - eventMessages?: Hume.empathicVoice.PostedEventMessageSpecs; + ellmModel?: Hume.empathicVoice.PostedEllmModel | null; + eventMessages?: Hume.empathicVoice.PostedEventMessageSpecs | null; /** EVI version to use. Only versions `3` and `4-mini` are supported. */ eviVersion: string; /** @@ -53,19 +53,19 @@ export interface PostedConfig { * * This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. */ - languageModel?: Hume.empathicVoice.PostedLanguageModel; + languageModel?: Hume.empathicVoice.PostedLanguageModel | null; /** Name applied to all versions of a particular Config. */ name: string; /** Configures nudges, brief audio prompts that can guide conversations when users pause or need encouragement to continue speaking. Nudges help create more natural, flowing interactions by providing gentle conversational cues. */ - nudges?: Hume.empathicVoice.PostedNudgeSpec; - prompt?: Hume.empathicVoice.PostedConfigPromptSpec; - timeouts?: Hume.empathicVoice.PostedTimeoutSpecs; + nudges?: Hume.empathicVoice.PostedNudgeSpec | null; + prompt?: Hume.empathicVoice.PostedConfigPromptSpec | null; + timeouts?: Hume.empathicVoice.PostedTimeoutSpecs | null; /** List of user-defined tools associated with this Config. */ - tools?: (Hume.empathicVoice.PostedUserDefinedToolSpec | undefined)[]; + tools?: (Hume.empathicVoice.PostedUserDefinedToolSpec | null)[] | null; /** An optional description of the Config version. */ - versionDescription?: string; + versionDescription?: string | null; /** A voice specification associated with this Config. */ voice?: Hume.empathicVoice.VoiceRef; /** Webhook config specifications for each subscriber. */ - webhooks?: (Hume.empathicVoice.PostedWebhookSpec | undefined)[]; + webhooks?: (Hume.empathicVoice.PostedWebhookSpec | null)[] | null; } diff --git a/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigName.ts b/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigName.ts index e6fc41e6..de25450e 100644 --- a/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigName.ts +++ b/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigName.ts @@ -3,10 +3,13 @@ /** * @example * { + * id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", * name: "Updated Weather Assistant Config Name" * } */ export interface PostedConfigName { + /** Identifier for a Config. Formatted as a UUID. */ + id: string; /** Name applied to all versions of a particular Config. */ name: string; } diff --git a/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts b/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts index dd53d01b..2f9d0e67 100644 --- a/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts +++ b/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts @@ -5,6 +5,7 @@ import type * as Hume from "../../../../../../index.js"; /** * @example * { + * id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", * versionDescription: "This is an updated version of the Weather Assistant Config.", * eviVersion: "3", * prompt: { @@ -40,15 +41,17 @@ import type * as Hume from "../../../../../../index.js"; * } */ export interface PostedConfigVersion { + /** Identifier for a Config. Formatted as a UUID. */ + id: string; /** List of built-in tools associated with this Config version. */ - builtinTools?: (Hume.empathicVoice.PostedBuiltinTool | undefined)[]; + builtinTools?: (Hume.empathicVoice.PostedBuiltinTool | null)[] | null; /** * The eLLM setup associated with this Config version. * * Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. */ - ellmModel?: Hume.empathicVoice.PostedEllmModel; - eventMessages?: Hume.empathicVoice.PostedEventMessageSpecs; + ellmModel?: Hume.empathicVoice.PostedEllmModel | null; + eventMessages?: Hume.empathicVoice.PostedEventMessageSpecs | null; /** The version of the EVI used with this config. */ eviVersion: string; /** @@ -56,16 +59,16 @@ export interface PostedConfigVersion { * * This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. */ - languageModel?: Hume.empathicVoice.PostedLanguageModel; - nudges?: Hume.empathicVoice.PostedNudgeSpec; - prompt?: Hume.empathicVoice.PostedConfigPromptSpec; - timeouts?: Hume.empathicVoice.PostedTimeoutSpecs; + languageModel?: Hume.empathicVoice.PostedLanguageModel | null; + nudges?: Hume.empathicVoice.PostedNudgeSpec | null; + prompt?: Hume.empathicVoice.PostedConfigPromptSpec | null; + timeouts?: Hume.empathicVoice.PostedTimeoutSpecs | null; /** List of user-defined tools associated with this Config version. */ - tools?: (Hume.empathicVoice.PostedUserDefinedToolSpec | undefined)[]; + tools?: (Hume.empathicVoice.PostedUserDefinedToolSpec | null)[] | null; /** An optional description of the Config version. */ - versionDescription?: string; + versionDescription?: string | null; /** A voice specification associated with this Config version. */ voice?: Hume.empathicVoice.VoiceRef; /** Webhook config specifications for each subscriber. */ - webhooks?: (Hume.empathicVoice.PostedWebhookSpec | undefined)[]; + webhooks?: (Hume.empathicVoice.PostedWebhookSpec | null)[] | null; } diff --git a/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersionDescription.ts b/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersionDescription.ts index 3f7265bb..2dafe916 100644 --- a/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersionDescription.ts +++ b/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersionDescription.ts @@ -3,10 +3,22 @@ /** * @example * { + * id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + * version: 1, * versionDescription: "This is an updated version_description." * } */ export interface PostedConfigVersionDescription { + /** Identifier for a Config. Formatted as a UUID. */ + id: string; + /** + * Version number for a Config. + * + * Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + * + * Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. + */ + version: number; /** An optional description of the Config version. */ - versionDescription?: string; + versionDescription?: string | null; } diff --git a/src/api/resources/empathicVoice/resources/configs/client/requests/index.ts b/src/api/resources/empathicVoice/resources/configs/client/requests/index.ts index 13301aad..f06fabbf 100644 --- a/src/api/resources/empathicVoice/resources/configs/client/requests/index.ts +++ b/src/api/resources/empathicVoice/resources/configs/client/requests/index.ts @@ -1,5 +1,8 @@ -export type { ConfigsListConfigsRequest } from "./ConfigsListConfigsRequest.js"; -export type { ConfigsListConfigVersionsRequest } from "./ConfigsListConfigVersionsRequest.js"; +export type { DeleteConfigConfigsRequest } from "./DeleteConfigConfigsRequest.js"; +export type { DeleteConfigVersionConfigsRequest } from "./DeleteConfigVersionConfigsRequest.js"; +export type { GetConfigVersionConfigsRequest } from "./GetConfigVersionConfigsRequest.js"; +export type { ListConfigsConfigsRequest } from "./ListConfigsConfigsRequest.js"; +export type { ListConfigVersionsConfigsRequest } from "./ListConfigVersionsConfigsRequest.js"; export type { PostedConfig } from "./PostedConfig.js"; export type { PostedConfigName } from "./PostedConfigName.js"; export type { PostedConfigVersion } from "./PostedConfigVersion.js"; diff --git a/src/api/resources/empathicVoice/resources/controlPlane/client/Client.ts.diff b/src/api/resources/empathicVoice/resources/controlPlane/client/Client.ts.diff new file mode 100644 index 00000000..8e07826b --- /dev/null +++ b/src/api/resources/empathicVoice/resources/controlPlane/client/Client.ts.diff @@ -0,0 +1,80 @@ +diff --git a/src/api/resources/empathicVoice/resources/controlPlane/client/Client.ts b/src/api/resources/empathicVoice/resources/controlPlane/client/Client.ts +index e092f572..2a2513b3 100644 +--- a/src/api/resources/empathicVoice/resources/controlPlane/client/Client.ts ++++ b/src/api/resources/empathicVoice/resources/controlPlane/client/Client.ts +@@ -36,30 +36,31 @@ export class ControlPlane { + /** + * Send a message to a specific chat. + * +- * @param {string} chatId +- * @param {Hume.empathicVoice.ControlPlanePublishEvent} request ++ * @param {Hume.empathicVoice.SendControlPlaneRequest} request + * @param {ControlPlane.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Hume.empathicVoice.UnprocessableEntityError} + * + * @example +- * await client.empathicVoice.controlPlane.send("chat_id", { +- * type: "session_settings" ++ * await client.empathicVoice.controlPlane.send({ ++ * chatId: "chat_id", ++ * body: { ++ * type: "session_settings" ++ * } + * }) + */ + public send( +- chatId: string, +- request: Hume.empathicVoice.ControlPlanePublishEvent, ++ request: Hume.empathicVoice.SendControlPlaneRequest, + requestOptions?: ControlPlane.RequestOptions, + ): core.HttpResponsePromise { +- return core.HttpResponsePromise.fromPromise(this.__send(chatId, request, requestOptions)); ++ return core.HttpResponsePromise.fromPromise(this.__send(request, requestOptions)); + } + + private async __send( +- chatId: string, +- request: Hume.empathicVoice.ControlPlanePublishEvent, ++ request: Hume.empathicVoice.SendControlPlaneRequest, + requestOptions?: ControlPlane.RequestOptions, + ): Promise> { ++ const { chatId, body: _body } = request; + const _headers: core.Fetcher.Args["headers"] = mergeHeaders( + this._options?.headers, + mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), +@@ -76,7 +77,7 @@ export class ControlPlane { + contentType: "application/json", + queryParameters: requestOptions?.queryParams, + requestType: "json", +- body: serializers.empathicVoice.ControlPlanePublishEvent.jsonOrThrow(request, { ++ body: serializers.empathicVoice.ControlPlanePublishEvent.jsonOrThrow(_body, { + unrecognizedObjectKeys: "strip", + omitUndefined: true, + }), +@@ -144,7 +145,7 @@ export class ControlPlane { + url: core.url.join( + (await core.Supplier.get(this._options.baseUrl)) ?? + ((await core.Supplier.get(this._options.environment)) ?? environments.HumeEnvironment.Prod).evi, +- `/chat/${core.url.encodePathParam(chat_id)}/connect`, ++ `/chat/${core.url.encodePathParam(chatId)}/connect`, + ), + protocols: [], + queryParameters: _queryParams, +@@ -153,12 +154,9 @@ export class ControlPlane { + }); + return new ControlPlaneSocket({ socket }); + } +- protected async _getCustomAuthorizationHeaders(): Promise> { +- const apiKeyValue = core.Supplier.get(this._options.apiKey); +- // This `authHeaderValue` is manually added as if you don't provide it it will +- // be omitted from the headers which means it won't reach the logic in ws.ts that +- // extracts values from the headers and adds them to query parameters. +- const authHeaderValue = core.Supplier.get(this._options.headers?.authorization); +- return { "X-Hume-Api-Key": apiKeyValue, Authorization: authHeaderValue }; ++ ++ protected async _getCustomAuthorizationHeaders(): Promise> { ++ const apiKeyValue = await core.Supplier.get(this._options.apiKey); ++ return { "X-Hume-Api-Key": apiKeyValue }; + } + } diff --git a/src/api/resources/empathicVoice/resources/controlPlane/client/index.ts b/src/api/resources/empathicVoice/resources/controlPlane/client/index.ts index cb0ff5c3..195f9aa8 100644 --- a/src/api/resources/empathicVoice/resources/controlPlane/client/index.ts +++ b/src/api/resources/empathicVoice/resources/controlPlane/client/index.ts @@ -1 +1 @@ -export {}; +export * from "./requests/index.js"; diff --git a/src/api/resources/empathicVoice/resources/controlPlane/client/requests/SendControlPlaneRequest.ts b/src/api/resources/empathicVoice/resources/controlPlane/client/requests/SendControlPlaneRequest.ts new file mode 100644 index 00000000..683091d1 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/controlPlane/client/requests/SendControlPlaneRequest.ts @@ -0,0 +1,17 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../../../index.js"; + +/** + * @example + * { + * chatId: "chat_id", + * body: { + * type: "session_settings" + * } + * } + */ +export interface SendControlPlaneRequest { + chatId: string; + body: Hume.empathicVoice.ControlPlanePublishEvent; +} diff --git a/src/api/resources/empathicVoice/resources/controlPlane/client/requests/index.ts b/src/api/resources/empathicVoice/resources/controlPlane/client/requests/index.ts new file mode 100644 index 00000000..e7088aa3 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/controlPlane/client/requests/index.ts @@ -0,0 +1 @@ +export type { SendControlPlaneRequest } from "./SendControlPlaneRequest.js"; diff --git a/src/api/resources/empathicVoice/resources/index.ts b/src/api/resources/empathicVoice/resources/index.ts index 97789630..0b89a3b2 100644 --- a/src/api/resources/empathicVoice/resources/index.ts +++ b/src/api/resources/empathicVoice/resources/index.ts @@ -6,6 +6,7 @@ export * from "./chats/client/requests/index.js"; export * as chats from "./chats/index.js"; export * from "./configs/client/requests/index.js"; export * as configs from "./configs/index.js"; +export * from "./controlPlane/client/requests/index.js"; export * as controlPlane from "./controlPlane/index.js"; export * from "./prompts/client/requests/index.js"; export * as prompts from "./prompts/index.js"; diff --git a/src/api/resources/empathicVoice/resources/prompts/client/Client.ts b/src/api/resources/empathicVoice/resources/prompts/client/Client.ts index 63debd8e..2c3d9ec4 100644 --- a/src/api/resources/empathicVoice/resources/prompts/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/prompts/client/Client.ts @@ -26,7 +26,7 @@ export class Prompts { * * See our [prompting guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on crafting your system prompt. * - * @param {Hume.empathicVoice.PromptsListPromptsRequest} request + * @param {Hume.empathicVoice.ListPromptsPromptsRequest} request * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} @@ -38,12 +38,12 @@ export class Prompts { * }) */ public async listPrompts( - request: Hume.empathicVoice.PromptsListPromptsRequest = {}, + request: Hume.empathicVoice.ListPromptsPromptsRequest = {}, requestOptions?: Prompts.RequestOptions, - ): Promise> { + ): Promise> { const list = core.HttpResponsePromise.interceptFunction( async ( - request: Hume.empathicVoice.PromptsListPromptsRequest, + request: Hume.empathicVoice.ListPromptsPromptsRequest, ): Promise> => { const { pageNumber, pageSize, restrictToMostRecent, name } = request; const _queryParams: Record = {}; @@ -132,7 +132,7 @@ export class Prompts { ); let _offset = request?.pageNumber != null ? request?.pageNumber : 0; const dataWithRawResponse = await list(request).withRawResponse(); - return new core.Page({ + return new core.Page({ response: dataWithRawResponse.data, rawResponse: dataWithRawResponse.rawResponse, hasNextPage: (response) => (response?.promptsPage ?? []).length > 0, @@ -163,14 +163,14 @@ export class Prompts { public createPrompt( request: Hume.empathicVoice.PostedPrompt, requestOptions?: Prompts.RequestOptions, - ): core.HttpResponsePromise { + ): core.HttpResponsePromise { return core.HttpResponsePromise.fromPromise(this.__createPrompt(request, requestOptions)); } private async __createPrompt( request: Hume.empathicVoice.PostedPrompt, requestOptions?: Prompts.RequestOptions, - ): Promise> { + ): Promise> { const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -254,29 +254,28 @@ export class Prompts { * * See our [prompting guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on crafting your system prompt. * - * @param {string} id - Identifier for a Prompt. Formatted as a UUID. - * @param {Hume.empathicVoice.PromptsListPromptVersionsRequest} request + * @param {Hume.empathicVoice.ListPromptVersionsPromptsRequest} request * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.prompts.listPromptVersions("af699d45-2985-42cc-91b9-af9e5da3bac5") + * await client.empathicVoice.prompts.listPromptVersions({ + * id: "af699d45-2985-42cc-91b9-af9e5da3bac5" + * }) */ public listPromptVersions( - id: string, - request: Hume.empathicVoice.PromptsListPromptVersionsRequest = {}, + request: Hume.empathicVoice.ListPromptVersionsPromptsRequest, requestOptions?: Prompts.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__listPromptVersions(id, request, requestOptions)); + return core.HttpResponsePromise.fromPromise(this.__listPromptVersions(request, requestOptions)); } private async __listPromptVersions( - id: string, - request: Hume.empathicVoice.PromptsListPromptVersionsRequest = {}, + request: Hume.empathicVoice.ListPromptVersionsPromptsRequest, requestOptions?: Prompts.RequestOptions, ): Promise> { - const { pageNumber, pageSize, restrictToMostRecent } = request; + const { id, pageNumber, pageSize, restrictToMostRecent } = request; const _queryParams: Record = {}; if (pageNumber != null) { _queryParams.page_number = pageNumber.toString(); @@ -367,31 +366,30 @@ export class Prompts { * * See our [prompting guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on crafting your system prompt. * - * @param {string} id - Identifier for a Prompt. Formatted as a UUID. * @param {Hume.empathicVoice.PostedPromptVersion} request * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.prompts.createPromptVersion("af699d45-2985-42cc-91b9-af9e5da3bac5", { + * await client.empathicVoice.prompts.createPromptVersion({ + * id: "af699d45-2985-42cc-91b9-af9e5da3bac5", * text: "You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", * versionDescription: "This is an updated version of the Weather Assistant Prompt." * }) */ public createPromptVersion( - id: string, request: Hume.empathicVoice.PostedPromptVersion, requestOptions?: Prompts.RequestOptions, - ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__createPromptVersion(id, request, requestOptions)); + ): core.HttpResponsePromise { + return core.HttpResponsePromise.fromPromise(this.__createPromptVersion(request, requestOptions)); } private async __createPromptVersion( - id: string, request: Hume.empathicVoice.PostedPromptVersion, requestOptions?: Prompts.RequestOptions, - ): Promise> { + ): Promise> { + const { id, ..._body } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -408,7 +406,7 @@ export class Prompts { contentType: "application/json", queryParameters: requestOptions?.queryParams, requestType: "json", - body: serializers.empathicVoice.PostedPromptVersion.jsonOrThrow(request, { + body: serializers.empathicVoice.PostedPromptVersion.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip", omitUndefined: true, }), @@ -475,22 +473,28 @@ export class Prompts { * * See our [prompting guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on crafting your system prompt. * - * @param {string} id - Identifier for a Prompt. Formatted as a UUID. + * @param {Hume.empathicVoice.DeletePromptPromptsRequest} request * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.prompts.deletePrompt("af699d45-2985-42cc-91b9-af9e5da3bac5") + * await client.empathicVoice.prompts.deletePrompt({ + * id: "af699d45-2985-42cc-91b9-af9e5da3bac5" + * }) */ - public deletePrompt(id: string, requestOptions?: Prompts.RequestOptions): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__deletePrompt(id, requestOptions)); + public deletePrompt( + request: Hume.empathicVoice.DeletePromptPromptsRequest, + requestOptions?: Prompts.RequestOptions, + ): core.HttpResponsePromise { + return core.HttpResponsePromise.fromPromise(this.__deletePrompt(request, requestOptions)); } private async __deletePrompt( - id: string, + request: Hume.empathicVoice.DeletePromptPromptsRequest, requestOptions?: Prompts.RequestOptions, ): Promise> { + const { id } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -559,30 +563,29 @@ export class Prompts { * * See our [prompting guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on crafting your system prompt. * - * @param {string} id - Identifier for a Prompt. Formatted as a UUID. * @param {Hume.empathicVoice.PostedPromptName} request * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.prompts.updatePromptName("af699d45-2985-42cc-91b9-af9e5da3bac5", { + * await client.empathicVoice.prompts.updatePromptName({ + * id: "af699d45-2985-42cc-91b9-af9e5da3bac5", * name: "Updated Weather Assistant Prompt Name" * }) */ public updatePromptName( - id: string, request: Hume.empathicVoice.PostedPromptName, requestOptions?: Prompts.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__updatePromptName(id, request, requestOptions)); + return core.HttpResponsePromise.fromPromise(this.__updatePromptName(request, requestOptions)); } private async __updatePromptName( - id: string, request: Hume.empathicVoice.PostedPromptName, requestOptions?: Prompts.RequestOptions, ): Promise> { + const { id, ..._body } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -599,7 +602,7 @@ export class Prompts { contentType: "application/json", queryParameters: requestOptions?.queryParams, requestType: "json", - body: serializers.empathicVoice.PostedPromptName.jsonOrThrow(request, { + body: serializers.empathicVoice.PostedPromptName.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip", omitUndefined: true, }), @@ -658,32 +661,29 @@ export class Prompts { * * See our [prompting guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on crafting your system prompt. * - * @param {string} id - Identifier for a Prompt. Formatted as a UUID. - * @param {number} version - Version number for a Prompt. - * - * Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. - * - * Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. + * @param {Hume.empathicVoice.GetPromptVersionPromptsRequest} request * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.prompts.getPromptVersion("af699d45-2985-42cc-91b9-af9e5da3bac5", 0) + * await client.empathicVoice.prompts.getPromptVersion({ + * id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + * version: 0 + * }) */ public getPromptVersion( - id: string, - version: number, + request: Hume.empathicVoice.GetPromptVersionPromptsRequest, requestOptions?: Prompts.RequestOptions, - ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__getPromptVersion(id, version, requestOptions)); + ): core.HttpResponsePromise { + return core.HttpResponsePromise.fromPromise(this.__getPromptVersion(request, requestOptions)); } private async __getPromptVersion( - id: string, - version: number, + request: Hume.empathicVoice.GetPromptVersionPromptsRequest, requestOptions?: Prompts.RequestOptions, - ): Promise> { + ): Promise> { + const { id, version } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -763,32 +763,29 @@ export class Prompts { * * See our [prompting guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on crafting your system prompt. * - * @param {string} id - Identifier for a Prompt. Formatted as a UUID. - * @param {number} version - Version number for a Prompt. - * - * Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. - * - * Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. + * @param {Hume.empathicVoice.DeletePromptVersionPromptsRequest} request * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.prompts.deletePromptVersion("af699d45-2985-42cc-91b9-af9e5da3bac5", 1) + * await client.empathicVoice.prompts.deletePromptVersion({ + * id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + * version: 1 + * }) */ public deletePromptVersion( - id: string, - version: number, + request: Hume.empathicVoice.DeletePromptVersionPromptsRequest, requestOptions?: Prompts.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__deletePromptVersion(id, version, requestOptions)); + return core.HttpResponsePromise.fromPromise(this.__deletePromptVersion(request, requestOptions)); } private async __deletePromptVersion( - id: string, - version: number, + request: Hume.empathicVoice.DeletePromptVersionPromptsRequest, requestOptions?: Prompts.RequestOptions, ): Promise> { + const { id, version } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -859,39 +856,30 @@ export class Prompts { * * See our [prompting guide](/docs/speech-to-speech-evi/guides/phone-calling) for tips on crafting your system prompt. * - * @param {string} id - Identifier for a Prompt. Formatted as a UUID. - * @param {number} version - Version number for a Prompt. - * - * Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. - * - * Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. * @param {Hume.empathicVoice.PostedPromptVersionDescription} request * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.prompts.updatePromptDescription("af699d45-2985-42cc-91b9-af9e5da3bac5", 1, { + * await client.empathicVoice.prompts.updatePromptDescription({ + * id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + * version: 1, * versionDescription: "This is an updated version_description." * }) */ public updatePromptDescription( - id: string, - version: number, - request: Hume.empathicVoice.PostedPromptVersionDescription = {}, + request: Hume.empathicVoice.PostedPromptVersionDescription, requestOptions?: Prompts.RequestOptions, - ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise( - this.__updatePromptDescription(id, version, request, requestOptions), - ); + ): core.HttpResponsePromise { + return core.HttpResponsePromise.fromPromise(this.__updatePromptDescription(request, requestOptions)); } private async __updatePromptDescription( - id: string, - version: number, - request: Hume.empathicVoice.PostedPromptVersionDescription = {}, + request: Hume.empathicVoice.PostedPromptVersionDescription, requestOptions?: Prompts.RequestOptions, - ): Promise> { + ): Promise> { + const { id, version, ..._body } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -908,7 +896,7 @@ export class Prompts { contentType: "application/json", queryParameters: requestOptions?.queryParams, requestType: "json", - body: serializers.empathicVoice.PostedPromptVersionDescription.jsonOrThrow(request, { + body: serializers.empathicVoice.PostedPromptVersionDescription.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip", omitUndefined: true, }), diff --git a/src/api/resources/empathicVoice/resources/prompts/client/requests/DeletePromptPromptsRequest.ts b/src/api/resources/empathicVoice/resources/prompts/client/requests/DeletePromptPromptsRequest.ts new file mode 100644 index 00000000..6704ec43 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/prompts/client/requests/DeletePromptPromptsRequest.ts @@ -0,0 +1,12 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * @example + * { + * id: "af699d45-2985-42cc-91b9-af9e5da3bac5" + * } + */ +export interface DeletePromptPromptsRequest { + /** Identifier for a Prompt. Formatted as a UUID. */ + id: string; +} diff --git a/src/api/resources/empathicVoice/resources/prompts/client/requests/DeletePromptVersionPromptsRequest.ts b/src/api/resources/empathicVoice/resources/prompts/client/requests/DeletePromptVersionPromptsRequest.ts new file mode 100644 index 00000000..d2fe7e1b --- /dev/null +++ b/src/api/resources/empathicVoice/resources/prompts/client/requests/DeletePromptVersionPromptsRequest.ts @@ -0,0 +1,21 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * @example + * { + * id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + * version: 1 + * } + */ +export interface DeletePromptVersionPromptsRequest { + /** Identifier for a Prompt. Formatted as a UUID. */ + id: string; + /** + * Version number for a Prompt. + * + * Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + * + * Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. + */ + version: number; +} diff --git a/src/api/resources/empathicVoice/resources/prompts/client/requests/GetPromptVersionPromptsRequest.ts b/src/api/resources/empathicVoice/resources/prompts/client/requests/GetPromptVersionPromptsRequest.ts new file mode 100644 index 00000000..546e0fd3 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/prompts/client/requests/GetPromptVersionPromptsRequest.ts @@ -0,0 +1,21 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * @example + * { + * id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + * version: 0 + * } + */ +export interface GetPromptVersionPromptsRequest { + /** Identifier for a Prompt. Formatted as a UUID. */ + id: string; + /** + * Version number for a Prompt. + * + * Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + * + * Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. + */ + version: number; +} diff --git a/src/api/resources/empathicVoice/resources/prompts/client/requests/PromptsListPromptVersionsRequest.ts b/src/api/resources/empathicVoice/resources/prompts/client/requests/ListPromptVersionsPromptsRequest.ts similarity index 83% rename from src/api/resources/empathicVoice/resources/prompts/client/requests/PromptsListPromptVersionsRequest.ts rename to src/api/resources/empathicVoice/resources/prompts/client/requests/ListPromptVersionsPromptsRequest.ts index bdaad4a6..bd3f837e 100644 --- a/src/api/resources/empathicVoice/resources/prompts/client/requests/PromptsListPromptVersionsRequest.ts +++ b/src/api/resources/empathicVoice/resources/prompts/client/requests/ListPromptVersionsPromptsRequest.ts @@ -2,9 +2,13 @@ /** * @example - * {} + * { + * id: "af699d45-2985-42cc-91b9-af9e5da3bac5" + * } */ -export interface PromptsListPromptVersionsRequest { +export interface ListPromptVersionsPromptsRequest { + /** Identifier for a Prompt. Formatted as a UUID. */ + id: string; /** * Specifies the page number to retrieve, enabling pagination. * diff --git a/src/api/resources/empathicVoice/resources/prompts/client/requests/PromptsListPromptsRequest.ts b/src/api/resources/empathicVoice/resources/prompts/client/requests/ListPromptsPromptsRequest.ts similarity index 95% rename from src/api/resources/empathicVoice/resources/prompts/client/requests/PromptsListPromptsRequest.ts rename to src/api/resources/empathicVoice/resources/prompts/client/requests/ListPromptsPromptsRequest.ts index 77c501f4..c9af4f3c 100644 --- a/src/api/resources/empathicVoice/resources/prompts/client/requests/PromptsListPromptsRequest.ts +++ b/src/api/resources/empathicVoice/resources/prompts/client/requests/ListPromptsPromptsRequest.ts @@ -7,7 +7,7 @@ * pageSize: 2 * } */ -export interface PromptsListPromptsRequest { +export interface ListPromptsPromptsRequest { /** * Specifies the page number to retrieve, enabling pagination. * diff --git a/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPrompt.ts b/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPrompt.ts index d0562a8c..fd785b1f 100644 --- a/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPrompt.ts +++ b/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPrompt.ts @@ -19,5 +19,5 @@ export interface PostedPrompt { */ text: string; /** An optional description of the Prompt version. */ - versionDescription?: string; + versionDescription?: string | null; } diff --git a/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptName.ts b/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptName.ts index 8912fc52..dca64be1 100644 --- a/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptName.ts +++ b/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptName.ts @@ -3,10 +3,13 @@ /** * @example * { + * id: "af699d45-2985-42cc-91b9-af9e5da3bac5", * name: "Updated Weather Assistant Prompt Name" * } */ export interface PostedPromptName { + /** Identifier for a Prompt. Formatted as a UUID. */ + id: string; /** Name applied to all versions of a particular Prompt. */ name: string; } diff --git a/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersion.ts b/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersion.ts index 34684664..4ea229e1 100644 --- a/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersion.ts +++ b/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersion.ts @@ -3,11 +3,14 @@ /** * @example * { + * id: "af699d45-2985-42cc-91b9-af9e5da3bac5", * text: "You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", * versionDescription: "This is an updated version of the Weather Assistant Prompt." * } */ export interface PostedPromptVersion { + /** Identifier for a Prompt. Formatted as a UUID. */ + id: string; /** * Instructions used to shape EVI's behavior, responses, and style for this version of the Prompt. * @@ -17,5 +20,5 @@ export interface PostedPromptVersion { */ text: string; /** An optional description of the Prompt version. */ - versionDescription?: string; + versionDescription?: string | null; } diff --git a/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersionDescription.ts b/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersionDescription.ts index 6af15747..5d1e5d7a 100644 --- a/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersionDescription.ts +++ b/src/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersionDescription.ts @@ -3,10 +3,22 @@ /** * @example * { + * id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + * version: 1, * versionDescription: "This is an updated version_description." * } */ export interface PostedPromptVersionDescription { + /** Identifier for a Prompt. Formatted as a UUID. */ + id: string; + /** + * Version number for a Prompt. + * + * Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + * + * Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. + */ + version: number; /** An optional description of the Prompt version. */ - versionDescription?: string; + versionDescription?: string | null; } diff --git a/src/api/resources/empathicVoice/resources/prompts/client/requests/index.ts b/src/api/resources/empathicVoice/resources/prompts/client/requests/index.ts index 904a263e..4dc411ef 100644 --- a/src/api/resources/empathicVoice/resources/prompts/client/requests/index.ts +++ b/src/api/resources/empathicVoice/resources/prompts/client/requests/index.ts @@ -1,6 +1,9 @@ +export type { DeletePromptPromptsRequest } from "./DeletePromptPromptsRequest.js"; +export type { DeletePromptVersionPromptsRequest } from "./DeletePromptVersionPromptsRequest.js"; +export type { GetPromptVersionPromptsRequest } from "./GetPromptVersionPromptsRequest.js"; +export type { ListPromptsPromptsRequest } from "./ListPromptsPromptsRequest.js"; +export type { ListPromptVersionsPromptsRequest } from "./ListPromptVersionsPromptsRequest.js"; export type { PostedPrompt } from "./PostedPrompt.js"; export type { PostedPromptName } from "./PostedPromptName.js"; export type { PostedPromptVersion } from "./PostedPromptVersion.js"; export type { PostedPromptVersionDescription } from "./PostedPromptVersionDescription.js"; -export type { PromptsListPromptsRequest } from "./PromptsListPromptsRequest.js"; -export type { PromptsListPromptVersionsRequest } from "./PromptsListPromptVersionsRequest.js"; diff --git a/src/api/resources/empathicVoice/resources/tools/client/Client.ts b/src/api/resources/empathicVoice/resources/tools/client/Client.ts index f01bc4b9..45039a0d 100644 --- a/src/api/resources/empathicVoice/resources/tools/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/tools/client/Client.ts @@ -26,7 +26,7 @@ export class Tools { * * Refer to our [tool use](/docs/speech-to-speech-evi/features/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. * - * @param {Hume.empathicVoice.ToolsListToolsRequest} request + * @param {Hume.empathicVoice.ListToolsToolsRequest} request * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} @@ -38,14 +38,14 @@ export class Tools { * }) */ public async listTools( - request: Hume.empathicVoice.ToolsListToolsRequest = {}, + request: Hume.empathicVoice.ListToolsToolsRequest = {}, requestOptions?: Tools.RequestOptions, ): Promise< - core.Page + core.Page > { const list = core.HttpResponsePromise.interceptFunction( async ( - request: Hume.empathicVoice.ToolsListToolsRequest, + request: Hume.empathicVoice.ListToolsToolsRequest, ): Promise> => { const { pageNumber, pageSize, restrictToMostRecent, name } = request; const _queryParams: Record = {}; @@ -135,7 +135,7 @@ export class Tools { let _offset = request?.pageNumber != null ? request?.pageNumber : 0; const dataWithRawResponse = await list(request).withRawResponse(); return new core.Page< - Hume.empathicVoice.ReturnUserDefinedTool | undefined, + Hume.empathicVoice.ReturnUserDefinedTool | null, Hume.empathicVoice.ReturnPagedUserDefinedTools >({ response: dataWithRawResponse.data, @@ -171,14 +171,14 @@ export class Tools { public createTool( request: Hume.empathicVoice.PostedUserDefinedTool, requestOptions?: Tools.RequestOptions, - ): core.HttpResponsePromise { + ): core.HttpResponsePromise { return core.HttpResponsePromise.fromPromise(this.__createTool(request, requestOptions)); } private async __createTool( request: Hume.empathicVoice.PostedUserDefinedTool, requestOptions?: Tools.RequestOptions, - ): Promise> { + ): Promise> { const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -262,27 +262,27 @@ export class Tools { * * Refer to our [tool use](/docs/speech-to-speech-evi/features/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. * - * @param {string} id - Identifier for a Tool. Formatted as a UUID. - * @param {Hume.empathicVoice.ToolsListToolVersionsRequest} request + * @param {Hume.empathicVoice.ListToolVersionsToolsRequest} request * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.tools.listToolVersions("00183a3f-79ba-413d-9f3b-609864268bea") + * await client.empathicVoice.tools.listToolVersions({ + * id: "00183a3f-79ba-413d-9f3b-609864268bea" + * }) */ public async listToolVersions( - id: string, - request: Hume.empathicVoice.ToolsListToolVersionsRequest = {}, + request: Hume.empathicVoice.ListToolVersionsToolsRequest, requestOptions?: Tools.RequestOptions, ): Promise< - core.Page + core.Page > { const list = core.HttpResponsePromise.interceptFunction( async ( - request: Hume.empathicVoice.ToolsListToolVersionsRequest, + request: Hume.empathicVoice.ListToolVersionsToolsRequest, ): Promise> => { - const { pageNumber, pageSize, restrictToMostRecent } = request; + const { id, pageNumber, pageSize, restrictToMostRecent } = request; const _queryParams: Record = {}; if (pageNumber != null) { _queryParams.page_number = pageNumber.toString(); @@ -367,7 +367,7 @@ export class Tools { let _offset = request?.pageNumber != null ? request?.pageNumber : 0; const dataWithRawResponse = await list(request).withRawResponse(); return new core.Page< - Hume.empathicVoice.ReturnUserDefinedTool | undefined, + Hume.empathicVoice.ReturnUserDefinedTool | null, Hume.empathicVoice.ReturnPagedUserDefinedTools >({ response: dataWithRawResponse.data, @@ -386,14 +386,14 @@ export class Tools { * * Refer to our [tool use](/docs/speech-to-speech-evi/features/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. * - * @param {string} id - Identifier for a Tool. Formatted as a UUID. * @param {Hume.empathicVoice.PostedUserDefinedToolVersion} request * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.tools.createToolVersion("00183a3f-79ba-413d-9f3b-609864268bea", { + * await client.empathicVoice.tools.createToolVersion({ + * id: "00183a3f-79ba-413d-9f3b-609864268bea", * parameters: "{ \"type\": \"object\", \"properties\": { \"location\": { \"type\": \"string\", \"description\": \"The city and state, e.g. San Francisco, CA\" }, \"format\": { \"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\", \"kelvin\"], \"description\": \"The temperature unit to use. Infer this from the users location.\" } }, \"required\": [\"location\", \"format\"] }", * versionDescription: "Fetches current weather and uses celsius, fahrenheit, or kelvin based on location of user.", * fallbackContent: "Unable to fetch current weather.", @@ -401,18 +401,17 @@ export class Tools { * }) */ public createToolVersion( - id: string, request: Hume.empathicVoice.PostedUserDefinedToolVersion, requestOptions?: Tools.RequestOptions, - ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__createToolVersion(id, request, requestOptions)); + ): core.HttpResponsePromise { + return core.HttpResponsePromise.fromPromise(this.__createToolVersion(request, requestOptions)); } private async __createToolVersion( - id: string, request: Hume.empathicVoice.PostedUserDefinedToolVersion, requestOptions?: Tools.RequestOptions, - ): Promise> { + ): Promise> { + const { id, ..._body } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -429,7 +428,7 @@ export class Tools { contentType: "application/json", queryParameters: requestOptions?.queryParams, requestType: "json", - body: serializers.empathicVoice.PostedUserDefinedToolVersion.jsonOrThrow(request, { + body: serializers.empathicVoice.PostedUserDefinedToolVersion.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip", omitUndefined: true, }), @@ -496,19 +495,28 @@ export class Tools { * * Refer to our [tool use](/docs/speech-to-speech-evi/features/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. * - * @param {string} id - Identifier for a Tool. Formatted as a UUID. + * @param {Hume.empathicVoice.DeleteToolToolsRequest} request * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.tools.deleteTool("00183a3f-79ba-413d-9f3b-609864268bea") + * await client.empathicVoice.tools.deleteTool({ + * id: "00183a3f-79ba-413d-9f3b-609864268bea" + * }) */ - public deleteTool(id: string, requestOptions?: Tools.RequestOptions): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__deleteTool(id, requestOptions)); + public deleteTool( + request: Hume.empathicVoice.DeleteToolToolsRequest, + requestOptions?: Tools.RequestOptions, + ): core.HttpResponsePromise { + return core.HttpResponsePromise.fromPromise(this.__deleteTool(request, requestOptions)); } - private async __deleteTool(id: string, requestOptions?: Tools.RequestOptions): Promise> { + private async __deleteTool( + request: Hume.empathicVoice.DeleteToolToolsRequest, + requestOptions?: Tools.RequestOptions, + ): Promise> { + const { id } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -577,30 +585,29 @@ export class Tools { * * Refer to our [tool use](/docs/speech-to-speech-evi/features/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. * - * @param {string} id - Identifier for a Tool. Formatted as a UUID. * @param {Hume.empathicVoice.PostedUserDefinedToolName} request * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.tools.updateToolName("00183a3f-79ba-413d-9f3b-609864268bea", { + * await client.empathicVoice.tools.updateToolName({ + * id: "00183a3f-79ba-413d-9f3b-609864268bea", * name: "get_current_temperature" * }) */ public updateToolName( - id: string, request: Hume.empathicVoice.PostedUserDefinedToolName, requestOptions?: Tools.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__updateToolName(id, request, requestOptions)); + return core.HttpResponsePromise.fromPromise(this.__updateToolName(request, requestOptions)); } private async __updateToolName( - id: string, request: Hume.empathicVoice.PostedUserDefinedToolName, requestOptions?: Tools.RequestOptions, ): Promise> { + const { id, ..._body } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -617,7 +624,7 @@ export class Tools { contentType: "application/json", queryParameters: requestOptions?.queryParams, requestType: "json", - body: serializers.empathicVoice.PostedUserDefinedToolName.jsonOrThrow(request, { + body: serializers.empathicVoice.PostedUserDefinedToolName.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip", omitUndefined: true, }), @@ -676,32 +683,29 @@ export class Tools { * * Refer to our [tool use](/docs/speech-to-speech-evi/features/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. * - * @param {string} id - Identifier for a Tool. Formatted as a UUID. - * @param {number} version - Version number for a Tool. - * - * Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. - * - * Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. + * @param {Hume.empathicVoice.GetToolVersionToolsRequest} request * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.tools.getToolVersion("00183a3f-79ba-413d-9f3b-609864268bea", 1) + * await client.empathicVoice.tools.getToolVersion({ + * id: "00183a3f-79ba-413d-9f3b-609864268bea", + * version: 1 + * }) */ public getToolVersion( - id: string, - version: number, + request: Hume.empathicVoice.GetToolVersionToolsRequest, requestOptions?: Tools.RequestOptions, - ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__getToolVersion(id, version, requestOptions)); + ): core.HttpResponsePromise { + return core.HttpResponsePromise.fromPromise(this.__getToolVersion(request, requestOptions)); } private async __getToolVersion( - id: string, - version: number, + request: Hume.empathicVoice.GetToolVersionToolsRequest, requestOptions?: Tools.RequestOptions, - ): Promise> { + ): Promise> { + const { id, version } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -781,32 +785,29 @@ export class Tools { * * Refer to our [tool use](/docs/speech-to-speech-evi/features/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. * - * @param {string} id - Identifier for a Tool. Formatted as a UUID. - * @param {number} version - Version number for a Tool. - * - * Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. - * - * Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. + * @param {Hume.empathicVoice.DeleteToolVersionToolsRequest} request * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.tools.deleteToolVersion("00183a3f-79ba-413d-9f3b-609864268bea", 1) + * await client.empathicVoice.tools.deleteToolVersion({ + * id: "00183a3f-79ba-413d-9f3b-609864268bea", + * version: 1 + * }) */ public deleteToolVersion( - id: string, - version: number, + request: Hume.empathicVoice.DeleteToolVersionToolsRequest, requestOptions?: Tools.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__deleteToolVersion(id, version, requestOptions)); + return core.HttpResponsePromise.fromPromise(this.__deleteToolVersion(request, requestOptions)); } private async __deleteToolVersion( - id: string, - version: number, + request: Hume.empathicVoice.DeleteToolVersionToolsRequest, requestOptions?: Tools.RequestOptions, ): Promise> { + const { id, version } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -877,37 +878,30 @@ export class Tools { * * Refer to our [tool use](/docs/speech-to-speech-evi/features/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. * - * @param {string} id - Identifier for a Tool. Formatted as a UUID. - * @param {number} version - Version number for a Tool. - * - * Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. - * - * Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. * @param {Hume.empathicVoice.PostedUserDefinedToolVersionDescription} request * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.empathicVoice.BadRequestError} * * @example - * await client.empathicVoice.tools.updateToolDescription("00183a3f-79ba-413d-9f3b-609864268bea", 1, { + * await client.empathicVoice.tools.updateToolDescription({ + * id: "00183a3f-79ba-413d-9f3b-609864268bea", + * version: 1, * versionDescription: "Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region." * }) */ public updateToolDescription( - id: string, - version: number, - request: Hume.empathicVoice.PostedUserDefinedToolVersionDescription = {}, + request: Hume.empathicVoice.PostedUserDefinedToolVersionDescription, requestOptions?: Tools.RequestOptions, - ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__updateToolDescription(id, version, request, requestOptions)); + ): core.HttpResponsePromise { + return core.HttpResponsePromise.fromPromise(this.__updateToolDescription(request, requestOptions)); } private async __updateToolDescription( - id: string, - version: number, - request: Hume.empathicVoice.PostedUserDefinedToolVersionDescription = {}, + request: Hume.empathicVoice.PostedUserDefinedToolVersionDescription, requestOptions?: Tools.RequestOptions, - ): Promise> { + ): Promise> { + const { id, version, ..._body } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -924,7 +918,7 @@ export class Tools { contentType: "application/json", queryParameters: requestOptions?.queryParams, requestType: "json", - body: serializers.empathicVoice.PostedUserDefinedToolVersionDescription.jsonOrThrow(request, { + body: serializers.empathicVoice.PostedUserDefinedToolVersionDescription.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip", omitUndefined: true, }), diff --git a/src/api/resources/empathicVoice/resources/tools/client/requests/DeleteToolToolsRequest.ts b/src/api/resources/empathicVoice/resources/tools/client/requests/DeleteToolToolsRequest.ts new file mode 100644 index 00000000..5e0775c1 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/tools/client/requests/DeleteToolToolsRequest.ts @@ -0,0 +1,12 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * @example + * { + * id: "00183a3f-79ba-413d-9f3b-609864268bea" + * } + */ +export interface DeleteToolToolsRequest { + /** Identifier for a Tool. Formatted as a UUID. */ + id: string; +} diff --git a/src/api/resources/empathicVoice/resources/tools/client/requests/DeleteToolVersionToolsRequest.ts b/src/api/resources/empathicVoice/resources/tools/client/requests/DeleteToolVersionToolsRequest.ts new file mode 100644 index 00000000..72578954 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/tools/client/requests/DeleteToolVersionToolsRequest.ts @@ -0,0 +1,21 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * @example + * { + * id: "00183a3f-79ba-413d-9f3b-609864268bea", + * version: 1 + * } + */ +export interface DeleteToolVersionToolsRequest { + /** Identifier for a Tool. Formatted as a UUID. */ + id: string; + /** + * Version number for a Tool. + * + * Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + * + * Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. + */ + version: number; +} diff --git a/src/api/resources/empathicVoice/resources/tools/client/requests/GetToolVersionToolsRequest.ts b/src/api/resources/empathicVoice/resources/tools/client/requests/GetToolVersionToolsRequest.ts new file mode 100644 index 00000000..345d8396 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/tools/client/requests/GetToolVersionToolsRequest.ts @@ -0,0 +1,21 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * @example + * { + * id: "00183a3f-79ba-413d-9f3b-609864268bea", + * version: 1 + * } + */ +export interface GetToolVersionToolsRequest { + /** Identifier for a Tool. Formatted as a UUID. */ + id: string; + /** + * Version number for a Tool. + * + * Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + * + * Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. + */ + version: number; +} diff --git a/src/api/resources/empathicVoice/resources/tools/client/requests/ToolsListToolVersionsRequest.ts b/src/api/resources/empathicVoice/resources/tools/client/requests/ListToolVersionsToolsRequest.ts similarity index 84% rename from src/api/resources/empathicVoice/resources/tools/client/requests/ToolsListToolVersionsRequest.ts rename to src/api/resources/empathicVoice/resources/tools/client/requests/ListToolVersionsToolsRequest.ts index dcff3a11..4de3a5e2 100644 --- a/src/api/resources/empathicVoice/resources/tools/client/requests/ToolsListToolVersionsRequest.ts +++ b/src/api/resources/empathicVoice/resources/tools/client/requests/ListToolVersionsToolsRequest.ts @@ -2,9 +2,13 @@ /** * @example - * {} + * { + * id: "00183a3f-79ba-413d-9f3b-609864268bea" + * } */ -export interface ToolsListToolVersionsRequest { +export interface ListToolVersionsToolsRequest { + /** Identifier for a Tool. Formatted as a UUID. */ + id: string; /** * Specifies the page number to retrieve, enabling pagination. * diff --git a/src/api/resources/empathicVoice/resources/tools/client/requests/ToolsListToolsRequest.ts b/src/api/resources/empathicVoice/resources/tools/client/requests/ListToolsToolsRequest.ts similarity index 96% rename from src/api/resources/empathicVoice/resources/tools/client/requests/ToolsListToolsRequest.ts rename to src/api/resources/empathicVoice/resources/tools/client/requests/ListToolsToolsRequest.ts index 15369f90..f4193a99 100644 --- a/src/api/resources/empathicVoice/resources/tools/client/requests/ToolsListToolsRequest.ts +++ b/src/api/resources/empathicVoice/resources/tools/client/requests/ListToolsToolsRequest.ts @@ -7,7 +7,7 @@ * pageSize: 2 * } */ -export interface ToolsListToolsRequest { +export interface ListToolsToolsRequest { /** * Specifies the page number to retrieve, enabling pagination. * diff --git a/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedTool.ts b/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedTool.ts index fc4d0739..5fba1de4 100644 --- a/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedTool.ts +++ b/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedTool.ts @@ -12,9 +12,9 @@ */ export interface PostedUserDefinedTool { /** An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. */ - description?: string; + description?: string | null; /** Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. */ - fallbackContent?: string; + fallbackContent?: string | null; /** Name applied to all versions of a particular Tool. */ name: string; /** @@ -24,5 +24,5 @@ export interface PostedUserDefinedTool { */ parameters: string; /** An optional description of the Tool version. */ - versionDescription?: string; + versionDescription?: string | null; } diff --git a/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolName.ts b/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolName.ts index 589576a5..d2215f97 100644 --- a/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolName.ts +++ b/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolName.ts @@ -3,10 +3,13 @@ /** * @example * { + * id: "00183a3f-79ba-413d-9f3b-609864268bea", * name: "get_current_temperature" * } */ export interface PostedUserDefinedToolName { + /** Identifier for a Tool. Formatted as a UUID. */ + id: string; /** Name applied to all versions of a particular Tool. */ name: string; } diff --git a/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersion.ts b/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersion.ts index 13d5877c..b7c82b33 100644 --- a/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersion.ts +++ b/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersion.ts @@ -3,6 +3,7 @@ /** * @example * { + * id: "00183a3f-79ba-413d-9f3b-609864268bea", * parameters: "{ \"type\": \"object\", \"properties\": { \"location\": { \"type\": \"string\", \"description\": \"The city and state, e.g. San Francisco, CA\" }, \"format\": { \"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\", \"kelvin\"], \"description\": \"The temperature unit to use. Infer this from the users location.\" } }, \"required\": [\"location\", \"format\"] }", * versionDescription: "Fetches current weather and uses celsius, fahrenheit, or kelvin based on location of user.", * fallbackContent: "Unable to fetch current weather.", @@ -10,10 +11,12 @@ * } */ export interface PostedUserDefinedToolVersion { + /** Identifier for a Tool. Formatted as a UUID. */ + id: string; /** An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. */ - description?: string; + description?: string | null; /** Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. */ - fallbackContent?: string; + fallbackContent?: string | null; /** * Stringified JSON defining the parameters used by this version of the Tool. * @@ -21,5 +24,5 @@ export interface PostedUserDefinedToolVersion { */ parameters: string; /** An optional description of the Tool version. */ - versionDescription?: string; + versionDescription?: string | null; } diff --git a/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersionDescription.ts b/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersionDescription.ts index 4b436e47..10bbfbdc 100644 --- a/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersionDescription.ts +++ b/src/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersionDescription.ts @@ -3,10 +3,22 @@ /** * @example * { + * id: "00183a3f-79ba-413d-9f3b-609864268bea", + * version: 1, * versionDescription: "Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region." * } */ export interface PostedUserDefinedToolVersionDescription { + /** Identifier for a Tool. Formatted as a UUID. */ + id: string; + /** + * Version number for a Tool. + * + * Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + * + * Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. + */ + version: number; /** An optional description of the Tool version. */ - versionDescription?: string; + versionDescription?: string | null; } diff --git a/src/api/resources/empathicVoice/resources/tools/client/requests/index.ts b/src/api/resources/empathicVoice/resources/tools/client/requests/index.ts index f50258c7..76a98be9 100644 --- a/src/api/resources/empathicVoice/resources/tools/client/requests/index.ts +++ b/src/api/resources/empathicVoice/resources/tools/client/requests/index.ts @@ -1,6 +1,9 @@ +export type { DeleteToolToolsRequest } from "./DeleteToolToolsRequest.js"; +export type { DeleteToolVersionToolsRequest } from "./DeleteToolVersionToolsRequest.js"; +export type { GetToolVersionToolsRequest } from "./GetToolVersionToolsRequest.js"; +export type { ListToolsToolsRequest } from "./ListToolsToolsRequest.js"; +export type { ListToolVersionsToolsRequest } from "./ListToolVersionsToolsRequest.js"; export type { PostedUserDefinedTool } from "./PostedUserDefinedTool.js"; export type { PostedUserDefinedToolName } from "./PostedUserDefinedToolName.js"; export type { PostedUserDefinedToolVersion } from "./PostedUserDefinedToolVersion.js"; export type { PostedUserDefinedToolVersionDescription } from "./PostedUserDefinedToolVersionDescription.js"; -export type { ToolsListToolsRequest } from "./ToolsListToolsRequest.js"; -export type { ToolsListToolVersionsRequest } from "./ToolsListToolVersionsRequest.js"; diff --git a/src/api/resources/empathicVoice/types/AssistantEnd.ts b/src/api/resources/empathicVoice/types/AssistantEnd.ts index 174c3a22..af0cf206 100644 --- a/src/api/resources/empathicVoice/types/AssistantEnd.ts +++ b/src/api/resources/empathicVoice/types/AssistantEnd.ts @@ -5,7 +5,7 @@ */ export interface AssistantEnd { /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** * The type of message sent through the socket; for an Assistant End message, this must be `assistant_end`. * diff --git a/src/api/resources/empathicVoice/types/AssistantInput.ts b/src/api/resources/empathicVoice/types/AssistantInput.ts index d46b52ab..caba2ccb 100644 --- a/src/api/resources/empathicVoice/types/AssistantInput.ts +++ b/src/api/resources/empathicVoice/types/AssistantInput.ts @@ -1,5 +1,7 @@ // This file was auto-generated by Fern from our API Definition. +import type * as Hume from "../../../index.js"; + /** * **Assistant text to synthesize into spoken audio and insert into the conversation.** EVI uses this text to generate spoken audio using our proprietary expressive text-to-speech model. * @@ -7,7 +9,7 @@ */ export interface AssistantInput { /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** * Assistant text to synthesize into spoken audio and insert into the conversation. * @@ -15,5 +17,5 @@ export interface AssistantInput { */ text: string; /** The type of message sent through the socket; must be `assistant_input` for our server to correctly identify and process it as an Assistant Input message. */ - type: "assistant_input"; + type: Hume.empathicVoice.AssistantInputType; } diff --git a/src/api/resources/empathicVoice/types/AssistantInputType.ts b/src/api/resources/empathicVoice/types/AssistantInputType.ts new file mode 100644 index 00000000..e6e753d7 --- /dev/null +++ b/src/api/resources/empathicVoice/types/AssistantInputType.ts @@ -0,0 +1,7 @@ +// This file was auto-generated by Fern from our API Definition. + +/** The type of message sent through the socket; must be `assistant_input` for our server to correctly identify and process it as an Assistant Input message. */ +export const AssistantInputType = { + AssistantInput: "assistant_input", +} as const; +export type AssistantInputType = (typeof AssistantInputType)[keyof typeof AssistantInputType]; diff --git a/src/api/resources/empathicVoice/types/AssistantMessage.ts b/src/api/resources/empathicVoice/types/AssistantMessage.ts index ff7b6b06..73ccafdf 100644 --- a/src/api/resources/empathicVoice/types/AssistantMessage.ts +++ b/src/api/resources/empathicVoice/types/AssistantMessage.ts @@ -7,15 +7,13 @@ import type * as Hume from "../../../index.js"; */ export interface AssistantMessage { /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** Indicates if this message was inserted into the conversation as text from an [Assistant Input message](/reference/speech-to-speech-evi/chat#send.AssistantInput.text). */ fromText: boolean; /** ID of the assistant message. Allows the Assistant Message to be tracked and referenced. */ id?: string; - /** Indicates if this message is a quick response or not. */ - isQuickResponse: boolean; /** Detected language of the message text. */ - language?: string; + language?: string | null; /** Transcript of the message. */ message: Hume.empathicVoice.ChatMessage; /** Inference model results. */ diff --git a/src/api/resources/empathicVoice/types/AssistantProsody.ts b/src/api/resources/empathicVoice/types/AssistantProsody.ts index 45d4dfb9..2588e79b 100644 --- a/src/api/resources/empathicVoice/types/AssistantProsody.ts +++ b/src/api/resources/empathicVoice/types/AssistantProsody.ts @@ -7,7 +7,7 @@ import type * as Hume from "../../../index.js"; */ export interface AssistantProsody { /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** Unique identifier for the segment. */ id?: string; /** Inference model results. */ @@ -17,5 +17,5 @@ export interface AssistantProsody { * * This message the expression measurement predictions of the assistant's audio output. */ - type: "assistant_prosody"; + type: Hume.empathicVoice.AssistantProsodyType; } diff --git a/src/api/resources/empathicVoice/types/AssistantProsodyType.ts b/src/api/resources/empathicVoice/types/AssistantProsodyType.ts new file mode 100644 index 00000000..c86e68a7 --- /dev/null +++ b/src/api/resources/empathicVoice/types/AssistantProsodyType.ts @@ -0,0 +1,11 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * The type of message sent through the socket; for an Assistant Prosody message, this must be `assistant_PROSODY`. + * + * This message the expression measurement predictions of the assistant's audio output. + */ +export const AssistantProsodyType = { + AssistantProsody: "assistant_prosody", +} as const; +export type AssistantProsodyType = (typeof AssistantProsodyType)[keyof typeof AssistantProsodyType]; diff --git a/src/api/resources/empathicVoice/types/AudioConfiguration.ts b/src/api/resources/empathicVoice/types/AudioConfiguration.ts index 6c7991df..20b63167 100644 --- a/src/api/resources/empathicVoice/types/AudioConfiguration.ts +++ b/src/api/resources/empathicVoice/types/AudioConfiguration.ts @@ -6,7 +6,7 @@ export interface AudioConfiguration { /** Number of audio channels. */ channels: number; /** Optional codec information. */ - codec?: string; + codec?: string | null; /** Encoding format of the audio input, such as `linear16`. */ encoding: Hume.empathicVoice.Encoding; /** Audio sample rate. Number of samples per second in the audio input, measured in Hertz. */ diff --git a/src/api/resources/empathicVoice/types/AudioInput.ts b/src/api/resources/empathicVoice/types/AudioInput.ts index 5f8b5bac..67a1d76b 100644 --- a/src/api/resources/empathicVoice/types/AudioInput.ts +++ b/src/api/resources/empathicVoice/types/AudioInput.ts @@ -7,7 +7,7 @@ */ export interface AudioInput { /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** * Base64 encoded audio input to insert into the conversation. * diff --git a/src/api/resources/empathicVoice/types/AudioOutput.ts b/src/api/resources/empathicVoice/types/AudioOutput.ts index 5d944081..b77161a1 100644 --- a/src/api/resources/empathicVoice/types/AudioOutput.ts +++ b/src/api/resources/empathicVoice/types/AudioOutput.ts @@ -7,7 +7,7 @@ */ export interface AudioOutput { /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** Base64 encoded audio output. This encoded audio is transmitted to the client, where it can be decoded and played back as part of the user interaction. */ data: string; /** ID of the audio output. Allows the Audio Output message to be tracked and referenced. */ diff --git a/src/api/resources/empathicVoice/types/BuiltinToolConfig.ts b/src/api/resources/empathicVoice/types/BuiltinToolConfig.ts index 3dfabc32..7102032a 100644 --- a/src/api/resources/empathicVoice/types/BuiltinToolConfig.ts +++ b/src/api/resources/empathicVoice/types/BuiltinToolConfig.ts @@ -4,6 +4,6 @@ import type * as Hume from "../../../index.js"; export interface BuiltinToolConfig { /** Optional text passed to the supplemental LLM if the tool call fails. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation. */ - fallbackContent?: string; + fallbackContent?: string | null; name: Hume.empathicVoice.BuiltInTool; } diff --git a/src/api/resources/empathicVoice/types/ChatMessage.ts b/src/api/resources/empathicVoice/types/ChatMessage.ts index 9282b664..f0226840 100644 --- a/src/api/resources/empathicVoice/types/ChatMessage.ts +++ b/src/api/resources/empathicVoice/types/ChatMessage.ts @@ -4,11 +4,11 @@ import type * as Hume from "../../../index.js"; export interface ChatMessage { /** Transcript of the message. */ - content?: string; + content?: string | null; /** Role of who is providing the message. */ role: Hume.empathicVoice.Role; /** Function call name and arguments. */ - toolCall?: Hume.empathicVoice.ToolCallMessage; + toolCall?: Hume.empathicVoice.ToolCallMessage | null; /** Function call response from client. */ - toolResult?: Hume.empathicVoice.ChatMessageToolResult; + toolResult?: Hume.empathicVoice.ChatMessageToolResult | null; } diff --git a/src/api/resources/empathicVoice/types/ChatMetadata.ts b/src/api/resources/empathicVoice/types/ChatMetadata.ts index dad14608..50e9b021 100644 --- a/src/api/resources/empathicVoice/types/ChatMetadata.ts +++ b/src/api/resources/empathicVoice/types/ChatMetadata.ts @@ -1,5 +1,7 @@ // This file was auto-generated by Fern from our API Definition. +import type * as Hume from "../../../index.js"; + /** * **The first message received after establishing a connection with EVI**, containing important identifiers for the current Chat session. * @@ -17,13 +19,13 @@ export interface ChatMetadata { /** ID of the Chat session. Allows the Chat session to be tracked and referenced. */ chatId: string; /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** ID of the initiating request. */ - requestId?: string; + requestId: string | null; /** * The type of message sent through the socket; for a Chat Metadata message, this must be `chat_metadata`. * * The Chat Metadata message is the first message you receive after establishing a connection with EVI and contains important identifiers for the current Chat session. */ - type: "chat_metadata"; + type: Hume.empathicVoice.ChatMetadataType; } diff --git a/src/api/resources/empathicVoice/types/ChatMetadataType.ts b/src/api/resources/empathicVoice/types/ChatMetadataType.ts new file mode 100644 index 00000000..2cd15a6b --- /dev/null +++ b/src/api/resources/empathicVoice/types/ChatMetadataType.ts @@ -0,0 +1,11 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * The type of message sent through the socket; for a Chat Metadata message, this must be `chat_metadata`. + * + * The Chat Metadata message is the first message you receive after establishing a connection with EVI and contains important identifiers for the current Chat session. + */ +export const ChatMetadataType = { + ChatMetadata: "chat_metadata", +} as const; +export type ChatMetadataType = (typeof ChatMetadataType)[keyof typeof ChatMetadataType]; diff --git a/src/api/resources/empathicVoice/types/ConnectSessionSettings.ts b/src/api/resources/empathicVoice/types/ConnectSessionSettings.ts index 4e1f833a..d03447ea 100644 --- a/src/api/resources/empathicVoice/types/ConnectSessionSettings.ts +++ b/src/api/resources/empathicVoice/types/ConnectSessionSettings.ts @@ -37,16 +37,14 @@ export interface ConnectSessionSettings { * For help writing a system prompt, see our [Prompting Guide](/docs/speech-to-speech-evi/guides/prompting). */ systemPrompt?: string; - /** The name or ID of the voice from the `Voice Library` to be used as the speaker for this EVI session. This will override the speaker set in the selected configuration. */ - voiceId?: string; /** * This field allows you to assign values to dynamic variables referenced in your system prompt. * * Each key represents the variable name, and the corresponding value is the specific content you wish to assign to that variable within the session. While the values for variables can be strings, numbers, or booleans, the value will ultimately be converted to a string when injected into your system prompt. * - * When used in query parameters, specify each variable using bracket notation: `session_settings[variables][key]=value`. For example: `session_settings[variables][name]=John&session_settings[variables][age]=30`. - * * Using this field, you can personalize responses based on session-specific details. For more guidance, see our [guide on using dynamic variables](/docs/speech-to-speech-evi/features/dynamic-variables). */ variables?: Record; + /** The name or ID of the voice from the `Voice Library` to be used as the speaker for this EVI session. This will override the speaker set in the selected configuration. */ + voiceId?: string; } diff --git a/src/api/resources/empathicVoice/types/Encoding.ts b/src/api/resources/empathicVoice/types/Encoding.ts index 3d12003c..7f2ad199 100644 --- a/src/api/resources/empathicVoice/types/Encoding.ts +++ b/src/api/resources/empathicVoice/types/Encoding.ts @@ -1,3 +1,6 @@ // This file was auto-generated by Fern from our API Definition. -export type Encoding = "linear16"; +export const Encoding = { + Linear16: "linear16", +} as const; +export type Encoding = (typeof Encoding)[keyof typeof Encoding]; diff --git a/src/api/resources/empathicVoice/types/ErrorLevel.ts b/src/api/resources/empathicVoice/types/ErrorLevel.ts index 96f899fa..7aad49ee 100644 --- a/src/api/resources/empathicVoice/types/ErrorLevel.ts +++ b/src/api/resources/empathicVoice/types/ErrorLevel.ts @@ -1,3 +1,6 @@ // This file was auto-generated by Fern from our API Definition. -export type ErrorLevel = "warn"; +export const ErrorLevel = { + Warn: "warn", +} as const; +export type ErrorLevel = (typeof ErrorLevel)[keyof typeof ErrorLevel]; diff --git a/src/api/resources/empathicVoice/types/ErrorType.ts b/src/api/resources/empathicVoice/types/ErrorType.ts new file mode 100644 index 00000000..f897ffc6 --- /dev/null +++ b/src/api/resources/empathicVoice/types/ErrorType.ts @@ -0,0 +1,11 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * The type of message sent through the socket; for a Web Socket Error message, this must be `error`. + * + * This message indicates a disruption in the WebSocket connection, such as an unexpected disconnection, protocol error, or data transmission issue. + */ +export const ErrorType = { + Error: "error", +} as const; +export type ErrorType = (typeof ErrorType)[keyof typeof ErrorType]; diff --git a/src/api/resources/empathicVoice/types/Inference.ts b/src/api/resources/empathicVoice/types/Inference.ts index dcf4eb28..6a518f6f 100644 --- a/src/api/resources/empathicVoice/types/Inference.ts +++ b/src/api/resources/empathicVoice/types/Inference.ts @@ -8,5 +8,5 @@ export interface Inference { * * EVI uses the prosody model to measure 48 emotions related to speech and vocal characteristics within a given expression. */ - prosody?: Hume.empathicVoice.ProsodyInference; + prosody: Hume.empathicVoice.ProsodyInference | null; } diff --git a/src/api/resources/empathicVoice/types/JsonMessage.ts b/src/api/resources/empathicVoice/types/JsonMessage.ts index 74e7c506..0e9b2fea 100644 --- a/src/api/resources/empathicVoice/types/JsonMessage.ts +++ b/src/api/resources/empathicVoice/types/JsonMessage.ts @@ -46,9 +46,4 @@ export type JsonMessage = * **Error message from the tool call**, not exposed to the LLM or user. Upon receiving a Tool Call message and failing to invoke the function, this message is sent to notify EVI of the tool's failure. * * For built-in tools implemented on the server, you will receive this message type rather than a `ToolCallMessage` if the tool fails. See our [Tool Use Guide](/docs/speech-to-speech-evi/features/tool-use) for further details. */ - | Hume.empathicVoice.ToolErrorMessage - /** - * **Settings for this chat session.** Session settings are temporary and apply only to the current Chat session. - * - * These settings can be adjusted dynamically based on the requirements of each session to ensure optimal performance and user experience. See our [Session Settings Guide](/docs/speech-to-speech-evi/configuration/session-settings) for a complete list of configurable settings. */ - | Hume.empathicVoice.SessionSettings; + | Hume.empathicVoice.ToolErrorMessage; diff --git a/src/api/resources/empathicVoice/types/PauseAssistantMessage.ts b/src/api/resources/empathicVoice/types/PauseAssistantMessage.ts index 8b59f3ef..2fe0dc0a 100644 --- a/src/api/resources/empathicVoice/types/PauseAssistantMessage.ts +++ b/src/api/resources/empathicVoice/types/PauseAssistantMessage.ts @@ -1,5 +1,7 @@ // This file was auto-generated by Fern from our API Definition. +import type * as Hume from "../../../index.js"; + /** * **Pause responses from EVI.** Chat history is still saved and sent after resuming. Once this message is sent, EVI will not respond until a Resume Assistant message is sent. * @@ -7,11 +9,11 @@ */ export interface PauseAssistantMessage { /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** * The type of message sent through the socket; must be `pause_assistant_message` for our server to correctly identify and process it as a Pause Assistant message. * * Once this message is sent, EVI will not respond until a [Resume Assistant message](/reference/speech-to-speech-evi/chat#send.ResumeAssistantMessage) is sent. When paused, EVI won't respond, but transcriptions of your audio inputs will still be recorded. */ - type: "pause_assistant_message"; + type: Hume.empathicVoice.PauseAssistantMessageType; } diff --git a/src/api/resources/empathicVoice/types/PauseAssistantMessageType.ts b/src/api/resources/empathicVoice/types/PauseAssistantMessageType.ts new file mode 100644 index 00000000..eee7bee6 --- /dev/null +++ b/src/api/resources/empathicVoice/types/PauseAssistantMessageType.ts @@ -0,0 +1,11 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * The type of message sent through the socket; must be `pause_assistant_message` for our server to correctly identify and process it as a Pause Assistant message. + * + * Once this message is sent, EVI will not respond until a [Resume Assistant message](/reference/speech-to-speech-evi/chat#send.ResumeAssistantMessage) is sent. When paused, EVI won't respond, but transcriptions of your audio inputs will still be recorded. + */ +export const PauseAssistantMessageType = { + PauseAssistantMessage: "pause_assistant_message", +} as const; +export type PauseAssistantMessageType = (typeof PauseAssistantMessageType)[keyof typeof PauseAssistantMessageType]; diff --git a/src/api/resources/empathicVoice/types/PostedBuiltinTool.ts b/src/api/resources/empathicVoice/types/PostedBuiltinTool.ts index 8cf8b685..368db4db 100644 --- a/src/api/resources/empathicVoice/types/PostedBuiltinTool.ts +++ b/src/api/resources/empathicVoice/types/PostedBuiltinTool.ts @@ -7,7 +7,7 @@ import type * as Hume from "../../../index.js"; */ export interface PostedBuiltinTool { /** Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. */ - fallbackContent?: string; + fallbackContent?: string | null; /** * Name of the built-in tool to use. Hume supports the following built-in tools: * diff --git a/src/api/resources/empathicVoice/types/PostedConfigPromptSpec.ts b/src/api/resources/empathicVoice/types/PostedConfigPromptSpec.ts index 4e2aa02a..420f48cb 100644 --- a/src/api/resources/empathicVoice/types/PostedConfigPromptSpec.ts +++ b/src/api/resources/empathicVoice/types/PostedConfigPromptSpec.ts @@ -5,9 +5,9 @@ */ export interface PostedConfigPromptSpec { /** Identifier for a Prompt. Formatted as a UUID. */ - id?: string; + id?: string | null; /** Text used to create a new prompt for a particular config. */ - text?: string; + text?: string | null; /** Version number for a Prompt. Version numbers should be integers. The combination of configId and version number is unique. */ - version?: number; + version?: number | null; } diff --git a/src/api/resources/empathicVoice/types/PostedEllmModel.ts b/src/api/resources/empathicVoice/types/PostedEllmModel.ts index a615f315..baeee0b1 100644 --- a/src/api/resources/empathicVoice/types/PostedEllmModel.ts +++ b/src/api/resources/empathicVoice/types/PostedEllmModel.ts @@ -9,5 +9,5 @@ export interface PostedEllmModel { * * If omitted, short responses from the eLLM are enabled by default. */ - allowShortResponses?: boolean; + allowShortResponses?: boolean | null; } diff --git a/src/api/resources/empathicVoice/types/PostedEventMessageSpec.ts b/src/api/resources/empathicVoice/types/PostedEventMessageSpec.ts index 544bc02a..bd0efa8d 100644 --- a/src/api/resources/empathicVoice/types/PostedEventMessageSpec.ts +++ b/src/api/resources/empathicVoice/types/PostedEventMessageSpec.ts @@ -11,5 +11,5 @@ export interface PostedEventMessageSpec { */ enabled: boolean; /** Text to use as the event message when the corresponding event occurs. If no text is specified, EVI will generate an appropriate message based on its current context and the system prompt. */ - text?: string; + text?: string | null; } diff --git a/src/api/resources/empathicVoice/types/PostedEventMessageSpecs.ts b/src/api/resources/empathicVoice/types/PostedEventMessageSpecs.ts index 3e9e83ad..2206b18f 100644 --- a/src/api/resources/empathicVoice/types/PostedEventMessageSpecs.ts +++ b/src/api/resources/empathicVoice/types/PostedEventMessageSpecs.ts @@ -17,9 +17,9 @@ export interface PostedEventMessageSpecs { * * However, if the inactivity message is not enabled, then reaching the inactivity timeout will immediately end the connection. */ - onInactivityTimeout?: Hume.empathicVoice.PostedEventMessageSpec; + onInactivityTimeout?: Hume.empathicVoice.PostedEventMessageSpec | null; /** Specifies the message EVI provides when the chat is disconnected due to reaching the maximum chat duration, such as a message mentioning the time limit for the chat has been reached. */ - onMaxDurationTimeout?: Hume.empathicVoice.PostedEventMessageSpec; + onMaxDurationTimeout?: Hume.empathicVoice.PostedEventMessageSpec | null; /** Specifies the initial message EVI provides when a new chat is started, such as a greeting or welcome message. */ - onNewChat?: Hume.empathicVoice.PostedEventMessageSpec; + onNewChat?: Hume.empathicVoice.PostedEventMessageSpec | null; } diff --git a/src/api/resources/empathicVoice/types/PostedLanguageModel.ts b/src/api/resources/empathicVoice/types/PostedLanguageModel.ts index 212a27f4..bd731dae 100644 --- a/src/api/resources/empathicVoice/types/PostedLanguageModel.ts +++ b/src/api/resources/empathicVoice/types/PostedLanguageModel.ts @@ -15,5 +15,5 @@ export interface PostedLanguageModel { * * Controls the randomness of the LLM's output, with values closer to 0 yielding focused, deterministic responses and values closer to 1 producing more creative, diverse responses. */ - temperature?: number; + temperature?: number | null; } diff --git a/src/api/resources/empathicVoice/types/PostedNudgeSpec.ts b/src/api/resources/empathicVoice/types/PostedNudgeSpec.ts index 0881466a..44853197 100644 --- a/src/api/resources/empathicVoice/types/PostedNudgeSpec.ts +++ b/src/api/resources/empathicVoice/types/PostedNudgeSpec.ts @@ -5,7 +5,7 @@ */ export interface PostedNudgeSpec { /** If true, EVI will 'nudge' the user to speak after a determined interval of silence. */ - enabled?: boolean; + enabled?: boolean | null; /** The interval of inactivity (in seconds) before a nudge is triggered. */ - intervalSecs?: number; + intervalSecs?: number | null; } diff --git a/src/api/resources/empathicVoice/types/PostedTimeoutSpec.ts b/src/api/resources/empathicVoice/types/PostedTimeoutSpec.ts index dae6f42d..f9755f7d 100644 --- a/src/api/resources/empathicVoice/types/PostedTimeoutSpec.ts +++ b/src/api/resources/empathicVoice/types/PostedTimeoutSpec.ts @@ -5,7 +5,7 @@ */ export interface PostedTimeoutSpec { /** Duration in seconds for the timeout. */ - durationSecs?: number; + durationSecs?: number | null; /** Boolean indicating if this event message is enabled. */ enabled: boolean; } diff --git a/src/api/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.ts b/src/api/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.ts index bd6aa0d9..7d4fdc15 100644 --- a/src/api/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.ts +++ b/src/api/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.ts @@ -7,7 +7,7 @@ */ export interface PostedTimeoutSpecsInactivity { /** Duration in seconds for the timeout (e.g. 600 seconds represents 10 minutes). */ - durationSecs?: number; + durationSecs?: number | null; /** * Boolean indicating if this timeout is enabled. * diff --git a/src/api/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.ts b/src/api/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.ts index 8cf9c40b..0c91aa72 100644 --- a/src/api/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.ts +++ b/src/api/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.ts @@ -7,7 +7,7 @@ */ export interface PostedTimeoutSpecsMaxDuration { /** Duration in seconds for the timeout (e.g. 600 seconds represents 10 minutes). */ - durationSecs?: number; + durationSecs?: number | null; /** * Boolean indicating if this timeout is enabled. * diff --git a/src/api/resources/empathicVoice/types/PostedUserDefinedToolSpec.ts b/src/api/resources/empathicVoice/types/PostedUserDefinedToolSpec.ts index 658a620d..0241022a 100644 --- a/src/api/resources/empathicVoice/types/PostedUserDefinedToolSpec.ts +++ b/src/api/resources/empathicVoice/types/PostedUserDefinedToolSpec.ts @@ -13,5 +13,5 @@ export interface PostedUserDefinedToolSpec { * * Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. */ - version?: number; + version?: number | null; } diff --git a/src/api/resources/empathicVoice/types/ResumeAssistantMessage.ts b/src/api/resources/empathicVoice/types/ResumeAssistantMessage.ts index ff7af21b..1163363e 100644 --- a/src/api/resources/empathicVoice/types/ResumeAssistantMessage.ts +++ b/src/api/resources/empathicVoice/types/ResumeAssistantMessage.ts @@ -1,5 +1,7 @@ // This file was auto-generated by Fern from our API Definition. +import type * as Hume from "../../../index.js"; + /** * **Resume responses from EVI.** Chat history sent while paused will now be sent. * @@ -7,11 +9,11 @@ */ export interface ResumeAssistantMessage { /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** * The type of message sent through the socket; must be `resume_assistant_message` for our server to correctly identify and process it as a Resume Assistant message. * * Upon resuming, if any audio input was sent during the pause, EVI will retain context from all messages sent but only respond to the last user message. (e.g., If you ask EVI two questions while paused and then send a `resume_assistant_message`, EVI will respond to the second question and have added the first question to its conversation context.) */ - type: "resume_assistant_message"; + type: Hume.empathicVoice.ResumeAssistantMessageType; } diff --git a/src/api/resources/empathicVoice/types/ResumeAssistantMessageType.ts b/src/api/resources/empathicVoice/types/ResumeAssistantMessageType.ts new file mode 100644 index 00000000..db751223 --- /dev/null +++ b/src/api/resources/empathicVoice/types/ResumeAssistantMessageType.ts @@ -0,0 +1,11 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * The type of message sent through the socket; must be `resume_assistant_message` for our server to correctly identify and process it as a Resume Assistant message. + * + * Upon resuming, if any audio input was sent during the pause, EVI will retain context from all messages sent but only respond to the last user message. (e.g., If you ask EVI two questions while paused and then send a `resume_assistant_message`, EVI will respond to the second question and have added the first question to its conversation context.) + */ +export const ResumeAssistantMessageType = { + ResumeAssistantMessage: "resume_assistant_message", +} as const; +export type ResumeAssistantMessageType = (typeof ResumeAssistantMessageType)[keyof typeof ResumeAssistantMessageType]; diff --git a/src/api/resources/empathicVoice/types/ReturnBuiltinTool.ts b/src/api/resources/empathicVoice/types/ReturnBuiltinTool.ts index 27b28a3a..6a9d60a9 100644 --- a/src/api/resources/empathicVoice/types/ReturnBuiltinTool.ts +++ b/src/api/resources/empathicVoice/types/ReturnBuiltinTool.ts @@ -7,7 +7,7 @@ import type * as Hume from "../../../index.js"; */ export interface ReturnBuiltinTool { /** Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. */ - fallbackContent?: string; + fallbackContent?: string | null; /** Name applied to all versions of a particular Tool. */ name: string; /** Type of Tool. Either `BUILTIN` for natively implemented tools, like web search, or `FUNCTION` for user-defined tools. */ diff --git a/src/api/resources/empathicVoice/types/ReturnChat.ts b/src/api/resources/empathicVoice/types/ReturnChat.ts index 31d6a05e..25f96ec7 100644 --- a/src/api/resources/empathicVoice/types/ReturnChat.ts +++ b/src/api/resources/empathicVoice/types/ReturnChat.ts @@ -8,15 +8,15 @@ import type * as Hume from "../../../index.js"; export interface ReturnChat { /** Identifier for the Chat Group. Any chat resumed from this Chat will have the same `chat_group_id`. Formatted as a UUID. */ chatGroupId: string; - config?: Hume.empathicVoice.ReturnConfigSpec; + config?: Hume.empathicVoice.ReturnConfigSpec | null; /** Time at which the Chat ended. Measured in seconds since the Unix epoch. */ - endTimestamp?: number; + endTimestamp?: number | null; /** The total number of events currently in this chat. */ - eventCount?: number; + eventCount?: number | null; /** Identifier for a Chat. Formatted as a UUID. */ id: string; /** Stringified JSON with additional metadata about the chat. */ - metadata?: string; + metadata?: string | null; /** Time at which the Chat started. Measured in seconds since the Unix epoch. */ startTimestamp: number; /** diff --git a/src/api/resources/empathicVoice/types/ReturnChatAudioReconstruction.ts b/src/api/resources/empathicVoice/types/ReturnChatAudioReconstruction.ts index dd5375c1..d8e3a38e 100644 --- a/src/api/resources/empathicVoice/types/ReturnChatAudioReconstruction.ts +++ b/src/api/resources/empathicVoice/types/ReturnChatAudioReconstruction.ts @@ -7,15 +7,15 @@ import type * as Hume from "../../../index.js"; */ export interface ReturnChatAudioReconstruction { /** Name of the chat audio reconstruction file. */ - filename?: string; + filename?: string | null; /** Identifier for the chat. Formatted as a UUID. */ id: string; /** The timestamp of the most recent status change for this audio reconstruction, formatted milliseconds since the Unix epoch. */ - modifiedAt?: number; + modifiedAt?: number | null; /** Signed URL used to download the chat audio reconstruction file. */ - signedAudioUrl?: string; + signedAudioUrl?: string | null; /** The timestamp when the signed URL will expire, formatted as a Unix epoch milliseconds. */ - signedUrlExpirationTimestampMillis?: number; + signedUrlExpirationTimestampMillis?: number | null; /** * Indicates the current state of the audio reconstruction job. There are five possible statuses: * diff --git a/src/api/resources/empathicVoice/types/ReturnChatEvent.ts b/src/api/resources/empathicVoice/types/ReturnChatEvent.ts index 33c35496..374733d9 100644 --- a/src/api/resources/empathicVoice/types/ReturnChatEvent.ts +++ b/src/api/resources/empathicVoice/types/ReturnChatEvent.ts @@ -13,15 +13,15 @@ export interface ReturnChatEvent { * * EVI uses the prosody model to measure 48 expressions related to speech and vocal characteristics. These results contain a detailed emotional and tonal analysis of the audio. Scores typically range from 0 to 1, with higher values indicating a stronger confidence level in the measured attribute. */ - emotionFeatures?: string; + emotionFeatures?: string | null; /** Identifier for a Chat Event. Formatted as a UUID. */ id: string; /** The text of the Chat Event. This field contains the message content for each event type listed in the `type` field. */ - messageText?: string; + messageText?: string | null; /** Stringified JSON with additional metadata about the chat event. */ - metadata?: string; + metadata?: string | null; /** Identifier for a related chat event. Currently only seen on ASSISTANT_PROSODY events, to point back to the ASSISTANT_MESSAGE that generated these prosody scores */ - relatedEventId?: string; + relatedEventId?: string | null; /** * The role of the entity which generated the Chat Event. There are four possible values: * - `USER`: The user, capable of sending user messages and interruptions. diff --git a/src/api/resources/empathicVoice/types/ReturnChatGroup.ts b/src/api/resources/empathicVoice/types/ReturnChatGroup.ts index b8a69b1b..28c581a9 100644 --- a/src/api/resources/empathicVoice/types/ReturnChatGroup.ts +++ b/src/api/resources/empathicVoice/types/ReturnChatGroup.ts @@ -13,8 +13,8 @@ export interface ReturnChatGroup { /** Identifier for the Chat Group. Any Chat resumed from this Chat Group will have the same `chat_group_id`. Formatted as a UUID. */ id: string; /** The `chat_id` of the most recent Chat in this Chat Group. Formatted as a UUID. */ - mostRecentChatId?: string; - mostRecentConfig?: Hume.empathicVoice.ReturnConfigSpec; + mostRecentChatId?: string | null; + mostRecentConfig?: Hume.empathicVoice.ReturnConfigSpec | null; /** Time at which the most recent Chat in this Chat Group was created. Measured in seconds since the Unix epoch. */ mostRecentStartTimestamp: number; /** The total number of Chats in this Chat Group. */ diff --git a/src/api/resources/empathicVoice/types/ReturnChatPagedEvents.ts b/src/api/resources/empathicVoice/types/ReturnChatPagedEvents.ts index 72c50c9f..c7dd0fcd 100644 --- a/src/api/resources/empathicVoice/types/ReturnChatPagedEvents.ts +++ b/src/api/resources/empathicVoice/types/ReturnChatPagedEvents.ts @@ -8,15 +8,15 @@ import type * as Hume from "../../../index.js"; export interface ReturnChatPagedEvents { /** Identifier for the Chat Group. Any chat resumed from this Chat will have the same `chat_group_id`. Formatted as a UUID. */ chatGroupId: string; - config?: Hume.empathicVoice.ReturnConfigSpec; + config?: Hume.empathicVoice.ReturnConfigSpec | null; /** Time at which the Chat ended. Measured in seconds since the Unix epoch. */ - endTimestamp?: number; + endTimestamp?: number | null; /** List of Chat Events for the specified `page_number` and `page_size`. */ eventsPage: Hume.empathicVoice.ReturnChatEvent[]; /** Identifier for a Chat. Formatted as a UUID. */ id: string; /** Stringified JSON with additional metadata about the chat. */ - metadata?: string; + metadata?: string | null; /** * The page number of the returned list. * diff --git a/src/api/resources/empathicVoice/types/ReturnConfig.ts b/src/api/resources/empathicVoice/types/ReturnConfig.ts index bca1809d..dcf60f66 100644 --- a/src/api/resources/empathicVoice/types/ReturnConfig.ts +++ b/src/api/resources/empathicVoice/types/ReturnConfig.ts @@ -7,30 +7,30 @@ import type * as Hume from "../../../index.js"; */ export interface ReturnConfig { /** List of built-in tools associated with this config */ - builtinTools?: (Hume.empathicVoice.ReturnBuiltinTool | undefined)[]; + builtinTools?: (Hume.empathicVoice.ReturnBuiltinTool | null)[] | null; /** The timestamp when the first version of this config was created. */ createdOn?: number; - ellmModel?: Hume.empathicVoice.ReturnEllmModel; - eventMessages?: Hume.empathicVoice.ReturnEventMessageSpecs; + ellmModel?: Hume.empathicVoice.ReturnEllmModel | null; + eventMessages?: Hume.empathicVoice.ReturnEventMessageSpecs | null; /** The version of the EVI used with this config. */ eviVersion?: string; /** Identifier for a Config. Formatted as a UUID. */ id?: string; - languageModel?: Hume.empathicVoice.ReturnLanguageModel; + languageModel?: Hume.empathicVoice.ReturnLanguageModel | null; /** The timestamp when this version of the config was created. */ modifiedOn?: number; /** Name applied to all versions of a particular Config. */ name?: string; - nudges?: Hume.empathicVoice.ReturnNudgeSpec; - prompt?: Hume.empathicVoice.ReturnPrompt; - timeouts?: Hume.empathicVoice.ReturnTimeoutSpecs; + nudges?: Hume.empathicVoice.ReturnNudgeSpec | null; + prompt?: Hume.empathicVoice.ReturnPrompt | null; + timeouts?: Hume.empathicVoice.ReturnTimeoutSpecs | null; /** List of user-defined tools associated with this config. */ - tools?: (Hume.empathicVoice.ReturnUserDefinedTool | undefined)[]; + tools?: (Hume.empathicVoice.ReturnUserDefinedTool | null)[] | null; /** Version number for a Config. Version numbers should be integers. The combination of configId and version number is unique. */ version?: number; /** Description that is appended to a specific version of a Config. */ - versionDescription?: string; + versionDescription?: string | null; voice?: Hume.empathicVoice.ReturnVoice; /** Map of webhooks associated with this config. */ - webhooks?: (Hume.empathicVoice.ReturnWebhookSpec | undefined)[]; + webhooks?: (Hume.empathicVoice.ReturnWebhookSpec | null)[] | null; } diff --git a/src/api/resources/empathicVoice/types/ReturnConfigSpec.ts b/src/api/resources/empathicVoice/types/ReturnConfigSpec.ts index a8033acf..81a8fb21 100644 --- a/src/api/resources/empathicVoice/types/ReturnConfigSpec.ts +++ b/src/api/resources/empathicVoice/types/ReturnConfigSpec.ts @@ -13,5 +13,5 @@ export interface ReturnConfigSpec { * * Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. */ - version?: number; + version?: number | null; } diff --git a/src/api/resources/empathicVoice/types/ReturnEventMessageSpec.ts b/src/api/resources/empathicVoice/types/ReturnEventMessageSpec.ts index f2985170..f610268f 100644 --- a/src/api/resources/empathicVoice/types/ReturnEventMessageSpec.ts +++ b/src/api/resources/empathicVoice/types/ReturnEventMessageSpec.ts @@ -11,5 +11,5 @@ export interface ReturnEventMessageSpec { */ enabled: boolean; /** Text to use as the event message when the corresponding event occurs. If no text is specified, EVI will generate an appropriate message based on its current context and the system prompt. */ - text?: string; + text?: string | null; } diff --git a/src/api/resources/empathicVoice/types/ReturnEventMessageSpecs.ts b/src/api/resources/empathicVoice/types/ReturnEventMessageSpecs.ts index e2cf024a..6f636059 100644 --- a/src/api/resources/empathicVoice/types/ReturnEventMessageSpecs.ts +++ b/src/api/resources/empathicVoice/types/ReturnEventMessageSpecs.ts @@ -17,9 +17,9 @@ export interface ReturnEventMessageSpecs { * * However, if the inactivity message is not enabled, then reaching the inactivity timeout will immediately end the connection. */ - onInactivityTimeout?: Hume.empathicVoice.ReturnEventMessageSpec; + onInactivityTimeout?: Hume.empathicVoice.ReturnEventMessageSpec | null; /** Specifies the message EVI provides when the chat is disconnected due to reaching the maximum chat duration, such as a message mentioning the time limit for the chat has been reached. */ - onMaxDurationTimeout?: Hume.empathicVoice.ReturnEventMessageSpec; + onMaxDurationTimeout?: Hume.empathicVoice.ReturnEventMessageSpec | null; /** Specifies the initial message EVI provides when a new chat is started, such as a greeting or welcome message. */ - onNewChat?: Hume.empathicVoice.ReturnEventMessageSpec; + onNewChat?: Hume.empathicVoice.ReturnEventMessageSpec | null; } diff --git a/src/api/resources/empathicVoice/types/ReturnLanguageModel.ts b/src/api/resources/empathicVoice/types/ReturnLanguageModel.ts index 69e3fa4d..b10040af 100644 --- a/src/api/resources/empathicVoice/types/ReturnLanguageModel.ts +++ b/src/api/resources/empathicVoice/types/ReturnLanguageModel.ts @@ -15,5 +15,5 @@ export interface ReturnLanguageModel { * * Controls the randomness of the LLM's output, with values closer to 0 yielding focused, deterministic responses and values closer to 1 producing more creative, diverse responses. */ - temperature?: number; + temperature?: number | null; } diff --git a/src/api/resources/empathicVoice/types/ReturnNudgeSpec.ts b/src/api/resources/empathicVoice/types/ReturnNudgeSpec.ts index 60d23fd2..cb1f9bd2 100644 --- a/src/api/resources/empathicVoice/types/ReturnNudgeSpec.ts +++ b/src/api/resources/empathicVoice/types/ReturnNudgeSpec.ts @@ -7,5 +7,5 @@ export interface ReturnNudgeSpec { /** EVI will nudge user after inactivity */ enabled: boolean; /** Time interval in seconds after which the nudge will be sent. */ - intervalSecs?: number; + intervalSecs?: number | null; } diff --git a/src/api/resources/empathicVoice/types/ReturnPagedPrompts.ts b/src/api/resources/empathicVoice/types/ReturnPagedPrompts.ts index 79482900..3c34fd2b 100644 --- a/src/api/resources/empathicVoice/types/ReturnPagedPrompts.ts +++ b/src/api/resources/empathicVoice/types/ReturnPagedPrompts.ts @@ -19,7 +19,7 @@ export interface ReturnPagedPrompts { */ pageSize: number; /** List of prompts returned for the specified `page_number` and `page_size`. */ - promptsPage: (Hume.empathicVoice.ReturnPrompt | undefined)[]; + promptsPage: (Hume.empathicVoice.ReturnPrompt | null)[]; /** The total number of pages in the collection. */ totalPages: number; } diff --git a/src/api/resources/empathicVoice/types/ReturnPagedUserDefinedTools.ts b/src/api/resources/empathicVoice/types/ReturnPagedUserDefinedTools.ts index e11099f8..b0b2e1ad 100644 --- a/src/api/resources/empathicVoice/types/ReturnPagedUserDefinedTools.ts +++ b/src/api/resources/empathicVoice/types/ReturnPagedUserDefinedTools.ts @@ -19,7 +19,7 @@ export interface ReturnPagedUserDefinedTools { */ pageSize: number; /** List of tools returned for the specified `page_number` and `page_size`. */ - toolsPage: (Hume.empathicVoice.ReturnUserDefinedTool | undefined)[]; + toolsPage: (Hume.empathicVoice.ReturnUserDefinedTool | null)[]; /** The total number of pages in the collection. */ totalPages: number; } diff --git a/src/api/resources/empathicVoice/types/ReturnPrompt.ts b/src/api/resources/empathicVoice/types/ReturnPrompt.ts index e6c440d8..3096f90b 100644 --- a/src/api/resources/empathicVoice/types/ReturnPrompt.ts +++ b/src/api/resources/empathicVoice/types/ReturnPrompt.ts @@ -17,7 +17,7 @@ export interface ReturnPrompt { /** Version number for a Prompt. Version numbers should be integers. The combination of configId and version number is unique. */ version: number; /** Description that is appended to a specific version of a Prompt. */ - versionDescription?: string; + versionDescription?: string | null; /** Indicates whether this prompt is using a fixed version number or auto-updating to the latest version. Values from the VersionType enum. */ versionType: string; } diff --git a/src/api/resources/empathicVoice/types/ReturnTimeoutSpec.ts b/src/api/resources/empathicVoice/types/ReturnTimeoutSpec.ts index 241557e6..6fae7d5f 100644 --- a/src/api/resources/empathicVoice/types/ReturnTimeoutSpec.ts +++ b/src/api/resources/empathicVoice/types/ReturnTimeoutSpec.ts @@ -5,7 +5,7 @@ */ export interface ReturnTimeoutSpec { /** Duration in seconds for the timeout (e.g. 600 seconds represents 10 minutes). */ - durationSecs?: number; + durationSecs?: number | null; /** * Boolean indicating if this timeout is enabled. * diff --git a/src/api/resources/empathicVoice/types/ReturnUserDefinedTool.ts b/src/api/resources/empathicVoice/types/ReturnUserDefinedTool.ts index 281e4317..7878bd0a 100644 --- a/src/api/resources/empathicVoice/types/ReturnUserDefinedTool.ts +++ b/src/api/resources/empathicVoice/types/ReturnUserDefinedTool.ts @@ -9,9 +9,9 @@ export interface ReturnUserDefinedTool { /** Time at which the Tool was created. Measured in seconds since the Unix epoch. */ createdOn: number; /** An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. */ - description?: string; + description?: string | null; /** Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. */ - fallbackContent?: string; + fallbackContent?: string | null; /** Identifier for a Tool. Formatted as a UUID. */ id: string; /** Time at which the Tool was last modified. Measured in seconds since the Unix epoch. */ @@ -35,7 +35,7 @@ export interface ReturnUserDefinedTool { */ version: number; /** An optional description of the Tool version. */ - versionDescription?: string; + versionDescription?: string | null; /** Versioning method for a Tool. Either `FIXED` for using a fixed version number or `LATEST` for auto-updating to the latest version. */ versionType: Hume.empathicVoice.ReturnUserDefinedToolVersionType; } diff --git a/src/api/resources/empathicVoice/types/SessionSettings.ts b/src/api/resources/empathicVoice/types/SessionSettings.ts index b96d1066..f9fed969 100644 --- a/src/api/resources/empathicVoice/types/SessionSettings.ts +++ b/src/api/resources/empathicVoice/types/SessionSettings.ts @@ -13,7 +13,7 @@ export interface SessionSettings { * * This optional field is only required when the audio input is encoded in PCM Linear 16 (16-bit, little-endian, signed PCM WAV data). For detailed instructions on how to configure session settings for PCM Linear 16 audio, please refer to the [Session Settings guide](/docs/speech-to-speech-evi/configuration/session-settings). */ - audio?: Hume.empathicVoice.AudioConfiguration; + audio?: Hume.empathicVoice.AudioConfiguration | null; /** * List of built-in tools to enable for the session. * @@ -21,7 +21,7 @@ export interface SessionSettings { * * Currently, the only built-in tool Hume provides is **Web Search**. When enabled, Web Search equips EVI with the ability to search the web for up-to-date information. */ - builtinTools?: Hume.empathicVoice.BuiltinToolConfig[]; + builtinTools?: Hume.empathicVoice.BuiltinToolConfig[] | null; /** * Field for injecting additional context into the conversation, which is appended to the end of user messages for the session. * @@ -29,7 +29,7 @@ export interface SessionSettings { * * Set to `null` to clear injected context. */ - context?: Hume.empathicVoice.Context; + context?: Hume.empathicVoice.Context | null; /** * Unique identifier for the session. Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. * @@ -37,14 +37,14 @@ export interface SessionSettings { * * It is recommended to pass a `custom_session_id` if you are using a Custom Language Model. Please see our guide to [using a custom language model](/docs/speech-to-speech-evi/guides/custom-language-model) with EVI to learn more. */ - customSessionId?: string; + customSessionId?: string | null; /** * Third party API key for the supplemental language model. * * When provided, EVI will use this key instead of Hume's API key for the supplemental LLM. This allows you to bypass rate limits and utilize your own API key as needed. */ - languageModelApiKey?: string; - metadata?: Record; + languageModelApiKey?: string | null; + metadata?: Record | null; /** * Instructions used to shape EVI's behavior, responses, and style for the session. * @@ -54,13 +54,13 @@ export interface SessionSettings { * * For help writing a system prompt, see our [Prompting Guide](/docs/speech-to-speech-evi/guides/prompting). */ - systemPrompt?: string; + systemPrompt?: string | null; /** * List of user-defined tools to enable for the session. * * Tools are resources used by EVI to perform various tasks, such as searching the web or calling external APIs. Built-in tools, like web search, are natively integrated, while user-defined tools are created and invoked by the user. To learn more, see our [Tool Use Guide](/docs/speech-to-speech-evi/features/tool-use). */ - tools?: Hume.empathicVoice.Tool[]; + tools?: Hume.empathicVoice.Tool[] | null; /** * The type of message sent through the socket; must be `session_settings` for our server to correctly identify and process it as a Session Settings message. * @@ -68,7 +68,7 @@ export interface SessionSettings { * * For more information, please refer to the [Session Settings guide](/docs/speech-to-speech-evi/configuration/session-settings). */ - type: "session_settings"; + type: Hume.empathicVoice.SessionSettingsType; /** * This field allows you to assign values to dynamic variables referenced in your system prompt. * @@ -78,5 +78,5 @@ export interface SessionSettings { */ variables?: Record; /** Allows you to change the voice during an active chat. Updating the voice does not affect chat context or conversation history. */ - voiceId?: string; + voiceId?: string | null; } diff --git a/src/api/resources/empathicVoice/types/SessionSettingsType.ts b/src/api/resources/empathicVoice/types/SessionSettingsType.ts new file mode 100644 index 00000000..5a5ffedb --- /dev/null +++ b/src/api/resources/empathicVoice/types/SessionSettingsType.ts @@ -0,0 +1,13 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * The type of message sent through the socket; must be `session_settings` for our server to correctly identify and process it as a Session Settings message. + * + * Session settings are temporary and apply only to the current Chat session. These settings can be adjusted dynamically based on the requirements of each session to ensure optimal performance and user experience. + * + * For more information, please refer to the [Session Settings guide](/docs/speech-to-speech-evi/configuration/session-settings). + */ +export const SessionSettingsType = { + SessionSettings: "session_settings", +} as const; +export type SessionSettingsType = (typeof SessionSettingsType)[keyof typeof SessionSettingsType]; diff --git a/src/api/resources/empathicVoice/types/SubscribeEvent.ts b/src/api/resources/empathicVoice/types/SubscribeEvent.ts index 792b30e6..0c798301 100644 --- a/src/api/resources/empathicVoice/types/SubscribeEvent.ts +++ b/src/api/resources/empathicVoice/types/SubscribeEvent.ts @@ -51,9 +51,4 @@ export type SubscribeEvent = * **Error message from the tool call**, not exposed to the LLM or user. Upon receiving a Tool Call message and failing to invoke the function, this message is sent to notify EVI of the tool's failure. * * For built-in tools implemented on the server, you will receive this message type rather than a `ToolCallMessage` if the tool fails. See our [Tool Use Guide](/docs/speech-to-speech-evi/features/tool-use) for further details. */ - | Hume.empathicVoice.ToolErrorMessage - /** - * **Settings for this chat session.** Session settings are temporary and apply only to the current Chat session. - * - * These settings can be adjusted dynamically based on the requirements of each session to ensure optimal performance and user experience. See our [Session Settings Guide](/docs/speech-to-speech-evi/configuration/session-settings) for a complete list of configurable settings. */ - | Hume.empathicVoice.SessionSettings; + | Hume.empathicVoice.ToolErrorMessage; diff --git a/src/api/resources/empathicVoice/types/Tool.ts b/src/api/resources/empathicVoice/types/Tool.ts index fde5ad28..d1699381 100644 --- a/src/api/resources/empathicVoice/types/Tool.ts +++ b/src/api/resources/empathicVoice/types/Tool.ts @@ -4,9 +4,9 @@ import type * as Hume from "../../../index.js"; export interface Tool { /** An optional description of what the tool does, used by the supplemental LLM to choose when and how to call the function. */ - description?: string; + description?: string | null; /** Optional text passed to the supplemental LLM if the tool call fails. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation. */ - fallbackContent?: string; + fallbackContent?: string | null; /** Name of the user-defined tool to be enabled. */ name: string; /** diff --git a/src/api/resources/empathicVoice/types/ToolCallMessage.ts b/src/api/resources/empathicVoice/types/ToolCallMessage.ts index 4c7205b6..66dd3550 100644 --- a/src/api/resources/empathicVoice/types/ToolCallMessage.ts +++ b/src/api/resources/empathicVoice/types/ToolCallMessage.ts @@ -7,7 +7,7 @@ import type * as Hume from "../../../index.js"; */ export interface ToolCallMessage { /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** Name of the tool called. */ name: string; /** Parameters of the tool call. Is a stringified JSON schema. */ @@ -27,5 +27,5 @@ export interface ToolCallMessage { * * This message indicates that the supplemental LLM has detected a need to invoke the specified tool. */ - type?: "tool_call"; + type?: Hume.empathicVoice.ToolCallMessageType; } diff --git a/src/api/resources/empathicVoice/types/ToolCallMessageType.ts b/src/api/resources/empathicVoice/types/ToolCallMessageType.ts new file mode 100644 index 00000000..5b254c48 --- /dev/null +++ b/src/api/resources/empathicVoice/types/ToolCallMessageType.ts @@ -0,0 +1,11 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * The type of message sent through the socket; for a Tool Call message, this must be `tool_call`. + * + * This message indicates that the supplemental LLM has detected a need to invoke the specified tool. + */ +export const ToolCallMessageType = { + ToolCall: "tool_call", +} as const; +export type ToolCallMessageType = (typeof ToolCallMessageType)[keyof typeof ToolCallMessageType]; diff --git a/src/api/resources/empathicVoice/types/ToolErrorMessage.ts b/src/api/resources/empathicVoice/types/ToolErrorMessage.ts index 677321ee..e192ac49 100644 --- a/src/api/resources/empathicVoice/types/ToolErrorMessage.ts +++ b/src/api/resources/empathicVoice/types/ToolErrorMessage.ts @@ -9,15 +9,15 @@ import type * as Hume from "../../../index.js"; */ export interface ToolErrorMessage { /** Error code. Identifies the type of error encountered. */ - code?: string; + code?: string | null; /** Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the tool errors. */ - content?: string; + content?: string | null; /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** Error message from the tool call, not exposed to the LLM or user. */ error: string; /** Indicates the severity of an error; for a Tool Error message, this must be `warn` to signal an unexpected event. */ - level?: Hume.empathicVoice.ErrorLevel; + level?: Hume.empathicVoice.ErrorLevel | null; /** * The unique identifier for a specific tool call instance. * @@ -25,11 +25,11 @@ export interface ToolErrorMessage { */ toolCallId: string; /** Type of tool called. Either `builtin` for natively implemented tools, like web search, or `function` for user-defined tools. */ - toolType?: Hume.empathicVoice.ToolType; + toolType?: Hume.empathicVoice.ToolType | null; /** * The type of message sent through the socket; for a Tool Error message, this must be `tool_error`. * * Upon receiving a [Tool Call message](/reference/speech-to-speech-evi/chat#receive.ToolCallMessage) and failing to invoke the function, this message is sent to notify EVI of the tool's failure. */ - type: "tool_error"; + type: Hume.empathicVoice.ToolErrorMessageType; } diff --git a/src/api/resources/empathicVoice/types/ToolErrorMessageType.ts b/src/api/resources/empathicVoice/types/ToolErrorMessageType.ts new file mode 100644 index 00000000..3b2f9db3 --- /dev/null +++ b/src/api/resources/empathicVoice/types/ToolErrorMessageType.ts @@ -0,0 +1,11 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * The type of message sent through the socket; for a Tool Error message, this must be `tool_error`. + * + * Upon receiving a [Tool Call message](/reference/speech-to-speech-evi/chat#receive.ToolCallMessage) and failing to invoke the function, this message is sent to notify EVI of the tool's failure. + */ +export const ToolErrorMessageType = { + ToolError: "tool_error", +} as const; +export type ToolErrorMessageType = (typeof ToolErrorMessageType)[keyof typeof ToolErrorMessageType]; diff --git a/src/api/resources/empathicVoice/types/ToolResponseMessage.ts b/src/api/resources/empathicVoice/types/ToolResponseMessage.ts index 9e0acbb9..3e7df055 100644 --- a/src/api/resources/empathicVoice/types/ToolResponseMessage.ts +++ b/src/api/resources/empathicVoice/types/ToolResponseMessage.ts @@ -11,7 +11,7 @@ export interface ToolResponseMessage { /** Return value of the tool call. Contains the output generated by the tool to pass back to EVI. */ content: string; /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** * The unique identifier for a specific tool call instance. * @@ -23,13 +23,13 @@ export interface ToolResponseMessage { * * Include this optional field to help the supplemental LLM identify which tool generated the response. The specified `tool_name` must match the one received in the [Tool Call message](/reference/speech-to-speech-evi/chat#receive.ToolCallMessage). */ - toolName?: string; + toolName?: string | null; /** Type of tool called. Either `builtin` for natively implemented tools, like web search, or `function` for user-defined tools. */ - toolType?: Hume.empathicVoice.ToolType; + toolType?: Hume.empathicVoice.ToolType | null; /** * The type of message sent through the socket; for a Tool Response message, this must be `tool_response`. * * Upon receiving a [Tool Call message](/reference/speech-to-speech-evi/chat#receive.ToolCallMessage) and successfully invoking the function, this message is sent to convey the result of the function call back to EVI. */ - type: "tool_response"; + type: Hume.empathicVoice.ToolResponseMessageType; } diff --git a/src/api/resources/empathicVoice/types/ToolResponseMessageType.ts b/src/api/resources/empathicVoice/types/ToolResponseMessageType.ts new file mode 100644 index 00000000..0710737c --- /dev/null +++ b/src/api/resources/empathicVoice/types/ToolResponseMessageType.ts @@ -0,0 +1,11 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * The type of message sent through the socket; for a Tool Response message, this must be `tool_response`. + * + * Upon receiving a [Tool Call message](/reference/speech-to-speech-evi/chat#receive.ToolCallMessage) and successfully invoking the function, this message is sent to convey the result of the function call back to EVI. + */ +export const ToolResponseMessageType = { + ToolResponse: "tool_response", +} as const; +export type ToolResponseMessageType = (typeof ToolResponseMessageType)[keyof typeof ToolResponseMessageType]; diff --git a/src/api/resources/empathicVoice/types/UserInput.ts b/src/api/resources/empathicVoice/types/UserInput.ts index 7348b572..c9406041 100644 --- a/src/api/resources/empathicVoice/types/UserInput.ts +++ b/src/api/resources/empathicVoice/types/UserInput.ts @@ -1,5 +1,7 @@ // This file was auto-generated by Fern from our API Definition. +import type * as Hume from "../../../index.js"; + /** * **User text to insert into the conversation.** Text sent through a User Input message is treated as the user's speech to EVI. EVI processes this input and provides a corresponding response. * @@ -7,9 +9,9 @@ */ export interface UserInput { /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** User text to insert into the conversation. Text sent through a User Input message is treated as the user's speech to EVI. EVI processes this input and provides a corresponding response. */ text: string; /** The type of message sent through the socket; must be `user_input` for our server to correctly identify and process it as a User Input message. */ - type: "user_input"; + type: Hume.empathicVoice.UserInputType; } diff --git a/src/api/resources/empathicVoice/types/UserInputType.ts b/src/api/resources/empathicVoice/types/UserInputType.ts new file mode 100644 index 00000000..a5a2b8e9 --- /dev/null +++ b/src/api/resources/empathicVoice/types/UserInputType.ts @@ -0,0 +1,7 @@ +// This file was auto-generated by Fern from our API Definition. + +/** The type of message sent through the socket; must be `user_input` for our server to correctly identify and process it as a User Input message. */ +export const UserInputType = { + UserInput: "user_input", +} as const; +export type UserInputType = (typeof UserInputType)[keyof typeof UserInputType]; diff --git a/src/api/resources/empathicVoice/types/UserInterruption.ts b/src/api/resources/empathicVoice/types/UserInterruption.ts index 8b299bb1..d9adf35b 100644 --- a/src/api/resources/empathicVoice/types/UserInterruption.ts +++ b/src/api/resources/empathicVoice/types/UserInterruption.ts @@ -1,5 +1,7 @@ // This file was auto-generated by Fern from our API Definition. +import type * as Hume from "../../../index.js"; + /** * **Indicates the user has interrupted the assistant's response.** EVI detects the interruption in real-time and sends this message to signal the interruption event. * @@ -7,7 +9,7 @@ */ export interface UserInterruption { /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** Unix timestamp of the detected user interruption. */ time: number; /** @@ -15,5 +17,5 @@ export interface UserInterruption { * * This message indicates the user has interrupted the assistant's response. EVI detects the interruption in real-time and sends this message to signal the interruption event. This message allows the system to stop the current audio playback, clear the audio queue, and prepare to handle new user input. */ - type: "user_interruption"; + type: Hume.empathicVoice.UserInterruptionType; } diff --git a/src/api/resources/empathicVoice/types/UserInterruptionType.ts b/src/api/resources/empathicVoice/types/UserInterruptionType.ts new file mode 100644 index 00000000..a01ac030 --- /dev/null +++ b/src/api/resources/empathicVoice/types/UserInterruptionType.ts @@ -0,0 +1,11 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * The type of message sent through the socket; for a User Interruption message, this must be `user_interruption`. + * + * This message indicates the user has interrupted the assistant's response. EVI detects the interruption in real-time and sends this message to signal the interruption event. This message allows the system to stop the current audio playback, clear the audio queue, and prepare to handle new user input. + */ +export const UserInterruptionType = { + UserInterruption: "user_interruption", +} as const; +export type UserInterruptionType = (typeof UserInterruptionType)[keyof typeof UserInterruptionType]; diff --git a/src/api/resources/empathicVoice/types/UserMessage.ts b/src/api/resources/empathicVoice/types/UserMessage.ts index a8da2cb7..5b1ae64a 100644 --- a/src/api/resources/empathicVoice/types/UserMessage.ts +++ b/src/api/resources/empathicVoice/types/UserMessage.ts @@ -9,7 +9,7 @@ import type * as Hume from "../../../index.js"; */ export interface UserMessage { /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** Indicates if this message was inserted into the conversation as text from a [User Input](/reference/speech-to-speech-evi/chat#send.UserInput.text) message. */ fromText: boolean; /** @@ -22,7 +22,7 @@ export interface UserMessage { */ interim: boolean; /** Detected language of the message text. */ - language?: string; + language?: string | null; /** Transcript of the message. */ message: Hume.empathicVoice.ChatMessage; /** Inference model results. */ @@ -34,5 +34,5 @@ export interface UserMessage { * * This message contains both a transcript of the user's input and the expression measurement predictions if the input was sent as an [Audio Input message](/reference/speech-to-speech-evi/chat#send.AudioInput). Expression measurement predictions are not provided for a [User Input message](/reference/speech-to-speech-evi/chat#send.UserInput), as the prosody model relies on audio input and cannot process text alone. */ - type: "user_message"; + type: Hume.empathicVoice.UserMessageType; } diff --git a/src/api/resources/empathicVoice/types/UserMessageType.ts b/src/api/resources/empathicVoice/types/UserMessageType.ts new file mode 100644 index 00000000..9a4c8709 --- /dev/null +++ b/src/api/resources/empathicVoice/types/UserMessageType.ts @@ -0,0 +1,11 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * The type of message sent through the socket; for a User Message, this must be `user_message`. + * + * This message contains both a transcript of the user's input and the expression measurement predictions if the input was sent as an [Audio Input message](/reference/speech-to-speech-evi/chat#send.AudioInput). Expression measurement predictions are not provided for a [User Input message](/reference/speech-to-speech-evi/chat#send.UserInput), as the prosody model relies on audio input and cannot process text alone. + */ +export const UserMessageType = { + UserMessage: "user_message", +} as const; +export type UserMessageType = (typeof UserMessageType)[keyof typeof UserMessageType]; diff --git a/src/api/resources/empathicVoice/types/WebSocketError.ts b/src/api/resources/empathicVoice/types/WebSocketError.ts index 850b6b2d..aefe2bf1 100644 --- a/src/api/resources/empathicVoice/types/WebSocketError.ts +++ b/src/api/resources/empathicVoice/types/WebSocketError.ts @@ -1,5 +1,7 @@ // This file was auto-generated by Fern from our API Definition. +import type * as Hume from "../../../index.js"; + /** * **Indicates a disruption in the WebSocket connection**, such as an unexpected disconnection, protocol error, or data transmission issue. * @@ -9,11 +11,11 @@ export interface WebSocketError { /** Error code. Identifies the type of error encountered. */ code: string; /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */ - customSessionId?: string; + customSessionId?: string | null; /** Detailed description of the error. */ message: string; /** ID of the initiating request. */ - requestId?: string; + requestId?: string | null; /** Short, human-readable identifier and description for the error. See a complete list of error slugs on the [Errors page](/docs/resources/errors). */ slug: string; /** @@ -21,5 +23,5 @@ export interface WebSocketError { * * This message indicates a disruption in the WebSocket connection, such as an unexpected disconnection, protocol error, or data transmission issue. */ - type: "error"; + type: Hume.empathicVoice.ErrorType; } diff --git a/src/api/resources/empathicVoice/types/WebhookEventBase.ts b/src/api/resources/empathicVoice/types/WebhookBaseEvent.ts similarity index 84% rename from src/api/resources/empathicVoice/types/WebhookEventBase.ts rename to src/api/resources/empathicVoice/types/WebhookBaseEvent.ts index c201a16f..20a7ff35 100644 --- a/src/api/resources/empathicVoice/types/WebhookEventBase.ts +++ b/src/api/resources/empathicVoice/types/WebhookBaseEvent.ts @@ -3,11 +3,11 @@ /** * Represents the fields common to all webhook events. */ -export interface WebhookEventBase { +export interface WebhookBaseEvent { /** Unique ID of the **Chat Group** associated with the **Chat** session. */ chatGroupId: string; /** Unique ID of the **Chat** session. */ chatId: string; /** Unique ID of the EVI **Config** used for the session. */ - configId?: string; + configId: string | null; } diff --git a/src/api/resources/empathicVoice/types/WebhookEventChatEnded.ts b/src/api/resources/empathicVoice/types/WebhookEventChatEnded.ts index 1371f1fc..80ae21ed 100644 --- a/src/api/resources/empathicVoice/types/WebhookEventChatEnded.ts +++ b/src/api/resources/empathicVoice/types/WebhookEventChatEnded.ts @@ -2,11 +2,11 @@ import type * as Hume from "../../../index.js"; -export interface WebhookEventChatEnded extends Hume.empathicVoice.WebhookEventBase { +export interface WebhookEventChatEnded extends Hume.empathicVoice.WebhookBaseEvent { /** Phone number of the caller in E.164 format (e.g., `+12223333333`). This field is included only if the Chat was created via the [Twilio phone calling](/docs/empathic-voice-interface-evi/phone-calling) integration. */ - callerNumber?: string; + callerNumber: string | null; /** User-defined session ID. Relevant only when employing a [custom language model](/docs/empathic-voice-interface-evi/custom-language-model) in the EVI Config. */ - customSessionId?: string; + customSessionId: string | null; /** Total duration of the session in seconds. */ durationSeconds: number; /** Reason for the session's termination. */ @@ -14,5 +14,5 @@ export interface WebhookEventChatEnded extends Hume.empathicVoice.WebhookEventBa /** Unix timestamp (in milliseconds) indicating when the session ended. */ endTime: number; /** Always `chat_ended`. */ - eventName?: "chat_ended"; + eventName?: Hume.empathicVoice.WebhookEventChatEndedEventName; } diff --git a/src/api/resources/empathicVoice/types/WebhookEventChatEndedEventName.ts b/src/api/resources/empathicVoice/types/WebhookEventChatEndedEventName.ts new file mode 100644 index 00000000..831635ae --- /dev/null +++ b/src/api/resources/empathicVoice/types/WebhookEventChatEndedEventName.ts @@ -0,0 +1,8 @@ +// This file was auto-generated by Fern from our API Definition. + +/** Always `chat_ended`. */ +export const WebhookEventChatEndedEventName = { + ChatEnded: "chat_ended", +} as const; +export type WebhookEventChatEndedEventName = + (typeof WebhookEventChatEndedEventName)[keyof typeof WebhookEventChatEndedEventName]; diff --git a/src/api/resources/empathicVoice/types/WebhookEventChatStarted.ts b/src/api/resources/empathicVoice/types/WebhookEventChatStarted.ts index 8e36eeb9..f1c3270d 100644 --- a/src/api/resources/empathicVoice/types/WebhookEventChatStarted.ts +++ b/src/api/resources/empathicVoice/types/WebhookEventChatStarted.ts @@ -2,15 +2,15 @@ import type * as Hume from "../../../index.js"; -export interface WebhookEventChatStarted extends Hume.empathicVoice.WebhookEventBase { +export interface WebhookEventChatStarted extends Hume.empathicVoice.WebhookBaseEvent { /** Phone number of the caller in E.164 format (e.g., `+12223333333`). This field is included only if the Chat was created via the [Twilio phone calling](/docs/empathic-voice-interface-evi/phone-calling) integration. */ - callerNumber?: string; + callerNumber: string | null; /** Indicates whether the chat is the first in a new Chat Group (`new_chat_group`) or the continuation of an existing chat group (`resumed_chat_group`). */ chatStartType: Hume.empathicVoice.WebhookEventChatStartType; /** User-defined session ID. Relevant only when employing a [custom language model](/docs/empathic-voice-interface-evi/custom-language-model) in the EVI Config. */ - customSessionId?: string; + customSessionId: string | null; /** Always `chat_started`. */ - eventName?: "chat_started"; + eventName?: Hume.empathicVoice.WebhookEventChatStartedEventName; /** Unix timestamp (in milliseconds) indicating when the session started. */ startTime: number; } diff --git a/src/api/resources/empathicVoice/types/WebhookEventChatStartedEventName.ts b/src/api/resources/empathicVoice/types/WebhookEventChatStartedEventName.ts new file mode 100644 index 00000000..a4425e96 --- /dev/null +++ b/src/api/resources/empathicVoice/types/WebhookEventChatStartedEventName.ts @@ -0,0 +1,8 @@ +// This file was auto-generated by Fern from our API Definition. + +/** Always `chat_started`. */ +export const WebhookEventChatStartedEventName = { + ChatStarted: "chat_started", +} as const; +export type WebhookEventChatStartedEventName = + (typeof WebhookEventChatStartedEventName)[keyof typeof WebhookEventChatStartedEventName]; diff --git a/src/api/resources/empathicVoice/types/WebhookEventToolCall.ts b/src/api/resources/empathicVoice/types/WebhookEventToolCall.ts index 4fcc3f58..eb178a93 100644 --- a/src/api/resources/empathicVoice/types/WebhookEventToolCall.ts +++ b/src/api/resources/empathicVoice/types/WebhookEventToolCall.ts @@ -2,13 +2,13 @@ import type * as Hume from "../../../index.js"; -export interface WebhookEventToolCall extends Hume.empathicVoice.WebhookEventBase { +export interface WebhookEventToolCall extends Hume.empathicVoice.WebhookBaseEvent { /** Phone number of the caller in E.164 format (e.g., `+12223333333`). This field is included only if the Chat was created via the [Twilio phone calling](/docs/empathic-voice-interface-evi/phone-calling) integration. */ - callerNumber?: string; + callerNumber: string | null; /** User-defined session ID. Relevant only when employing a [custom language model](/docs/empathic-voice-interface-evi/custom-language-model) in the EVI Config. */ - customSessionId?: string; + customSessionId: string | null; /** Always `tool_call`. */ - eventName?: "tool_call"; + eventName?: Hume.empathicVoice.WebhookEventToolCallEventName; /** Unix timestamp (in milliseconds) indicating when the tool call was triggered. */ timestamp: number; /** The tool call. */ diff --git a/src/api/resources/empathicVoice/types/WebhookEventToolCallEventName.ts b/src/api/resources/empathicVoice/types/WebhookEventToolCallEventName.ts new file mode 100644 index 00000000..8f57142a --- /dev/null +++ b/src/api/resources/empathicVoice/types/WebhookEventToolCallEventName.ts @@ -0,0 +1,8 @@ +// This file was auto-generated by Fern from our API Definition. + +/** Always `tool_call`. */ +export const WebhookEventToolCallEventName = { + ToolCall: "tool_call", +} as const; +export type WebhookEventToolCallEventName = + (typeof WebhookEventToolCallEventName)[keyof typeof WebhookEventToolCallEventName]; diff --git a/src/api/resources/empathicVoice/types/index.ts b/src/api/resources/empathicVoice/types/index.ts index 378af84a..28c305ac 100644 --- a/src/api/resources/empathicVoice/types/index.ts +++ b/src/api/resources/empathicVoice/types/index.ts @@ -1,7 +1,9 @@ export * from "./AssistantEnd.js"; export * from "./AssistantInput.js"; +export * from "./AssistantInputType.js"; export * from "./AssistantMessage.js"; export * from "./AssistantProsody.js"; +export * from "./AssistantProsodyType.js"; export * from "./AudioConfiguration.js"; export * from "./AudioInput.js"; export * from "./AudioOutput.js"; @@ -10,6 +12,7 @@ export * from "./BuiltinToolConfig.js"; export * from "./ChatMessage.js"; export * from "./ChatMessageToolResult.js"; export * from "./ChatMetadata.js"; +export * from "./ChatMetadataType.js"; export * from "./ConnectSessionSettings.js"; export * from "./ConnectSessionSettingsAudio.js"; export * from "./ConnectSessionSettingsContext.js"; @@ -21,6 +24,7 @@ export * from "./EmotionScores.js"; export * from "./Encoding.js"; export * from "./ErrorLevel.js"; export * from "./ErrorResponse.js"; +export * from "./ErrorType.js"; export * from "./HttpValidationError.js"; export * from "./Inference.js"; export * from "./JsonMessage.js"; @@ -28,6 +32,7 @@ export * from "./LanguageModelType.js"; export * from "./MillisecondInterval.js"; export * from "./ModelProviderEnum.js"; export * from "./PauseAssistantMessage.js"; +export * from "./PauseAssistantMessageType.js"; export * from "./PostedBuiltinTool.js"; export * from "./PostedBuiltinToolName.js"; export * from "./PostedConfigPromptSpec.js"; @@ -45,6 +50,7 @@ export * from "./PostedWebhookEventType.js"; export * from "./PostedWebhookSpec.js"; export * from "./ProsodyInference.js"; export * from "./ResumeAssistantMessage.js"; +export * from "./ResumeAssistantMessageType.js"; export * from "./ReturnBuiltinTool.js"; export * from "./ReturnBuiltinToolToolType.js"; export * from "./ReturnChat.js"; @@ -90,27 +96,37 @@ export * from "./ReturnWebhookEventType.js"; export * from "./ReturnWebhookSpec.js"; export * from "./Role.js"; export * from "./SessionSettings.js"; +export * from "./SessionSettingsType.js"; export * from "./SessionSettingsVariablesValue.js"; export * from "./SubscribeEvent.js"; export * from "./Tool.js"; export * from "./ToolCallMessage.js"; +export * from "./ToolCallMessageType.js"; export * from "./ToolErrorMessage.js"; +export * from "./ToolErrorMessageType.js"; export * from "./ToolResponseMessage.js"; +export * from "./ToolResponseMessageType.js"; export * from "./ToolType.js"; export * from "./UserInput.js"; +export * from "./UserInputType.js"; export * from "./UserInterruption.js"; +export * from "./UserInterruptionType.js"; export * from "./UserMessage.js"; +export * from "./UserMessageType.js"; export * from "./ValidationError.js"; export * from "./ValidationErrorLocItem.js"; export * from "./VoiceId.js"; export * from "./VoiceName.js"; export * from "./VoiceProvider.js"; export * from "./VoiceRef.js"; +export * from "./WebhookBaseEvent.js"; export * from "./WebhookEvent.js"; -export * from "./WebhookEventBase.js"; export * from "./WebhookEventChatEnded.js"; +export * from "./WebhookEventChatEndedEventName.js"; export * from "./WebhookEventChatStarted.js"; +export * from "./WebhookEventChatStartedEventName.js"; export * from "./WebhookEventChatStartType.js"; export * from "./WebhookEventChatStatus.js"; export * from "./WebhookEventToolCall.js"; +export * from "./WebhookEventToolCallEventName.js"; export * from "./WebSocketError.js"; diff --git a/src/api/resources/expressionMeasurement/resources/batch/client/Client.ts b/src/api/resources/expressionMeasurement/resources/batch/client/Client.ts index 61428d5c..4f49fad8 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/client/Client.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/client/Client.ts @@ -25,21 +25,21 @@ export class Batch { /** * Sort and filter jobs. * - * @param {Hume.expressionMeasurement.batch.BatchListJobsRequest} request + * @param {Hume.expressionMeasurement.batch.ListJobsBatchRequest} request * @param {Batch.RequestOptions} requestOptions - Request-specific configuration. * * @example * await client.expressionMeasurement.batch.listJobs() */ public listJobs( - request: Hume.expressionMeasurement.batch.BatchListJobsRequest = {}, + request: Hume.expressionMeasurement.batch.ListJobsBatchRequest = {}, requestOptions?: Batch.RequestOptions, ): core.HttpResponsePromise { return core.HttpResponsePromise.fromPromise(this.__listJobs(request, requestOptions)); } private async __listJobs( - request: Hume.expressionMeasurement.batch.BatchListJobsRequest = {}, + request: Hume.expressionMeasurement.batch.ListJobsBatchRequest = {}, requestOptions?: Batch.RequestOptions, ): Promise> { const { limit, status, when, timestampMs, sortBy, direction } = request; @@ -237,23 +237,26 @@ export class Batch { /** * Get the request details and state of a given job. * - * @param {string} id - The unique identifier for the job. + * @param {Hume.expressionMeasurement.batch.GetJobDetailsBatchRequest} request * @param {Batch.RequestOptions} requestOptions - Request-specific configuration. * * @example - * await client.expressionMeasurement.batch.getJobDetails("job_id") + * await client.expressionMeasurement.batch.getJobDetails({ + * id: "job_id" + * }) */ public getJobDetails( - id: string, + request: Hume.expressionMeasurement.batch.GetJobDetailsBatchRequest, requestOptions?: Batch.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__getJobDetails(id, requestOptions)); + return core.HttpResponsePromise.fromPromise(this.__getJobDetails(request, requestOptions)); } private async __getJobDetails( - id: string, + request: Hume.expressionMeasurement.batch.GetJobDetailsBatchRequest, requestOptions?: Batch.RequestOptions, ): Promise> { + const { id } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -315,23 +318,26 @@ export class Batch { /** * Get the JSON predictions of a completed inference job. * - * @param {string} id - The unique identifier for the job. + * @param {Hume.expressionMeasurement.batch.GetJobPredictionsBatchRequest} request * @param {Batch.RequestOptions} requestOptions - Request-specific configuration. * * @example - * await client.expressionMeasurement.batch.getJobPredictions("job_id") + * await client.expressionMeasurement.batch.getJobPredictions({ + * id: "job_id" + * }) */ public getJobPredictions( - id: string, + request: Hume.expressionMeasurement.batch.GetJobPredictionsBatchRequest, requestOptions?: Batch.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__getJobPredictions(id, requestOptions)); + return core.HttpResponsePromise.fromPromise(this.__getJobPredictions(request, requestOptions)); } private async __getJobPredictions( - id: string, + request: Hume.expressionMeasurement.batch.GetJobPredictionsBatchRequest, requestOptions?: Batch.RequestOptions, ): Promise> { + const { id } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -394,16 +400,17 @@ export class Batch { * Get the artifacts ZIP of a completed inference job. */ public getJobArtifacts( - id: string, + request: Hume.expressionMeasurement.batch.GetJobArtifactsBatchRequest, requestOptions?: Batch.RequestOptions, ): core.HttpResponsePromise { - return core.HttpResponsePromise.fromPromise(this.__getJobArtifacts(id, requestOptions)); + return core.HttpResponsePromise.fromPromise(this.__getJobArtifacts(request, requestOptions)); } private async __getJobArtifacts( - id: string, + request: Hume.expressionMeasurement.batch.GetJobArtifactsBatchRequest, requestOptions?: Batch.RequestOptions, ): Promise> { + const { id } = request; const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -457,7 +464,7 @@ export class Batch { /** * Start a new batch inference job. * - * @param {Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest} request + * @param {Hume.expressionMeasurement.batch.StartInferenceJobFromLocalFileBatchRequest} request * @param {Batch.RequestOptions} requestOptions - Request-specific configuration. * * @example @@ -467,14 +474,14 @@ export class Batch { * }) */ public startInferenceJobFromLocalFile( - request: Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest, + request: Hume.expressionMeasurement.batch.StartInferenceJobFromLocalFileBatchRequest, requestOptions?: Batch.RequestOptions, ): core.HttpResponsePromise { return core.HttpResponsePromise.fromPromise(this.__startInferenceJobFromLocalFile(request, requestOptions)); } private async __startInferenceJobFromLocalFile( - request: Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest, + request: Hume.expressionMeasurement.batch.StartInferenceJobFromLocalFileBatchRequest, requestOptions?: Batch.RequestOptions, ): Promise> { const _request = await core.newFormData(); diff --git a/src/api/resources/expressionMeasurement/resources/batch/client/requests/GetJobArtifactsBatchRequest.ts b/src/api/resources/expressionMeasurement/resources/batch/client/requests/GetJobArtifactsBatchRequest.ts new file mode 100644 index 00000000..ca4f4708 --- /dev/null +++ b/src/api/resources/expressionMeasurement/resources/batch/client/requests/GetJobArtifactsBatchRequest.ts @@ -0,0 +1,6 @@ +// This file was auto-generated by Fern from our API Definition. + +export interface GetJobArtifactsBatchRequest { + /** The unique identifier for the job. */ + id: string; +} diff --git a/src/api/resources/expressionMeasurement/resources/batch/client/requests/GetJobDetailsBatchRequest.ts b/src/api/resources/expressionMeasurement/resources/batch/client/requests/GetJobDetailsBatchRequest.ts new file mode 100644 index 00000000..550d469b --- /dev/null +++ b/src/api/resources/expressionMeasurement/resources/batch/client/requests/GetJobDetailsBatchRequest.ts @@ -0,0 +1,12 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * @example + * { + * id: "job_id" + * } + */ +export interface GetJobDetailsBatchRequest { + /** The unique identifier for the job. */ + id: string; +} diff --git a/src/api/resources/expressionMeasurement/resources/batch/client/requests/GetJobPredictionsBatchRequest.ts b/src/api/resources/expressionMeasurement/resources/batch/client/requests/GetJobPredictionsBatchRequest.ts new file mode 100644 index 00000000..ae151caa --- /dev/null +++ b/src/api/resources/expressionMeasurement/resources/batch/client/requests/GetJobPredictionsBatchRequest.ts @@ -0,0 +1,12 @@ +// This file was auto-generated by Fern from our API Definition. + +/** + * @example + * { + * id: "job_id" + * } + */ +export interface GetJobPredictionsBatchRequest { + /** The unique identifier for the job. */ + id: string; +} diff --git a/src/api/resources/expressionMeasurement/resources/batch/client/requests/BatchListJobsRequest.ts b/src/api/resources/expressionMeasurement/resources/batch/client/requests/ListJobsBatchRequest.ts similarity index 98% rename from src/api/resources/expressionMeasurement/resources/batch/client/requests/BatchListJobsRequest.ts rename to src/api/resources/expressionMeasurement/resources/batch/client/requests/ListJobsBatchRequest.ts index 3ad23b30..3c503d79 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/client/requests/BatchListJobsRequest.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/client/requests/ListJobsBatchRequest.ts @@ -6,7 +6,7 @@ import type * as Hume from "../../../../../../index.js"; * @example * {} */ -export interface BatchListJobsRequest { +export interface ListJobsBatchRequest { /** The maximum number of jobs to include in the response. */ limit?: number; /** diff --git a/src/api/resources/expressionMeasurement/resources/batch/client/requests/BatchStartInferenceJobFromLocalFileRequest.ts b/src/api/resources/expressionMeasurement/resources/batch/client/requests/StartInferenceJobFromLocalFileBatchRequest.ts similarity index 91% rename from src/api/resources/expressionMeasurement/resources/batch/client/requests/BatchStartInferenceJobFromLocalFileRequest.ts rename to src/api/resources/expressionMeasurement/resources/batch/client/requests/StartInferenceJobFromLocalFileBatchRequest.ts index 0e27f33d..adeba193 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/client/requests/BatchStartInferenceJobFromLocalFileRequest.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/client/requests/StartInferenceJobFromLocalFileBatchRequest.ts @@ -9,7 +9,7 @@ import type * as Hume from "../../../../../../index.js"; * file: [fs.createReadStream("/path/to/your/file")] * } */ -export interface BatchStartInferenceJobFromLocalFileRequest { +export interface StartInferenceJobFromLocalFileBatchRequest { /** Stringified JSON object containing the inference job configuration. */ json?: Hume.expressionMeasurement.batch.InferenceBaseRequest; /** diff --git a/src/api/resources/expressionMeasurement/resources/batch/client/requests/index.ts b/src/api/resources/expressionMeasurement/resources/batch/client/requests/index.ts index ef2dac7d..4035ee76 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/client/requests/index.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/client/requests/index.ts @@ -1,2 +1,5 @@ -export type { BatchListJobsRequest } from "./BatchListJobsRequest.js"; -export type { BatchStartInferenceJobFromLocalFileRequest } from "./BatchStartInferenceJobFromLocalFileRequest.js"; +export type { GetJobArtifactsBatchRequest } from "./GetJobArtifactsBatchRequest.js"; +export type { GetJobDetailsBatchRequest } from "./GetJobDetailsBatchRequest.js"; +export type { GetJobPredictionsBatchRequest } from "./GetJobPredictionsBatchRequest.js"; +export type { ListJobsBatchRequest } from "./ListJobsBatchRequest.js"; +export type { StartInferenceJobFromLocalFileBatchRequest } from "./StartInferenceJobFromLocalFileBatchRequest.js"; diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/Alternative.ts b/src/api/resources/expressionMeasurement/resources/batch/types/Alternative.ts index fe87b797..71a20793 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/Alternative.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/Alternative.ts @@ -1,3 +1,6 @@ // This file was auto-generated by Fern from our API Definition. -export type Alternative = "language_only"; +export const Alternative = { + LanguageOnly: "language_only", +} as const; +export type Alternative = (typeof Alternative)[keyof typeof Alternative]; diff --git a/src/api/resources/expressionMeasurement/resources/stream/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts b/src/api/resources/expressionMeasurement/resources/stream/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts index f41c1014..cf69dc76 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts @@ -6,7 +6,7 @@ export interface StreamModelPredictionsFacePredictionsItem { /** Frame number */ frame?: number; /** Time in seconds when face detection occurred. */ - time?: number; + time?: number | null; bbox?: Hume.expressionMeasurement.stream.StreamBoundingBox; /** The predicted probability that a detected face was actually a face. */ prob?: number; diff --git a/src/api/resources/tts/client/requests/ConvertVoiceFileRequest.ts b/src/api/resources/tts/client/requests/ConvertVoiceFileRequest.ts index f41734eb..d514787f 100644 --- a/src/api/resources/tts/client/requests/ConvertVoiceFileRequest.ts +++ b/src/api/resources/tts/client/requests/ConvertVoiceFileRequest.ts @@ -8,7 +8,7 @@ export interface ConvertVoiceFileRequest { stripHeaders?: boolean; audio: core.file.Uploadable; /** Utterances to use as context for generating consistent speech style and prosody across multiple requests. These will not be converted to speech output. */ - context?: Hume.tts.PostedContext; + context?: Hume.tts.PostedContext | null; voice?: Hume.tts.PostedUtteranceVoice; /** Specifies the output audio file format. */ format?: Hume.tts.Format; diff --git a/src/api/resources/tts/client/requests/ConvertVoiceJsonRequest.ts b/src/api/resources/tts/client/requests/ConvertVoiceJsonRequest.ts index 29b01f54..1ca42355 100644 --- a/src/api/resources/tts/client/requests/ConvertVoiceJsonRequest.ts +++ b/src/api/resources/tts/client/requests/ConvertVoiceJsonRequest.ts @@ -12,7 +12,7 @@ export interface ConvertVoiceJsonRequest { stripHeaders?: boolean; audio?: core.file.Uploadable | undefined; /** Utterances to use as context for generating consistent speech style and prosody across multiple requests. These will not be converted to speech output. */ - context?: Hume.tts.PostedContext; + context?: Hume.tts.PostedContext | null; voice?: Hume.tts.PostedUtteranceVoice; /** Specifies the output audio file format. */ format?: Hume.tts.Format; diff --git a/src/api/resources/tts/resources/streamInput/client/Client.ts.diff b/src/api/resources/tts/resources/streamInput/client/Client.ts.diff new file mode 100644 index 00000000..b53f0067 --- /dev/null +++ b/src/api/resources/tts/resources/streamInput/client/Client.ts.diff @@ -0,0 +1,20 @@ +diff --git a/src/api/resources/tts/resources/streamInput/client/Client.ts b/src/api/resources/tts/resources/streamInput/client/Client.ts +index 55e3be81..38701335 100644 +--- a/src/api/resources/tts/resources/streamInput/client/Client.ts ++++ b/src/api/resources/tts/resources/streamInput/client/Client.ts +@@ -125,12 +125,8 @@ export class StreamInput { + return new StreamInputSocket({ socket }); + } + +- protected async _getCustomAuthorizationHeaders(): Promise> { +- const apiKeyValue = core.Supplier.get(this._options.apiKey); +- // This `authHeaderValue` is manually added as if you don't provide it it will +- // be omitted from the headers which means it won't reach the logic in ws.ts that +- // extracts values from the headers and adds them to query parameters. +- const authHeaderValue = core.Supplier.get(this._options.headers?.authorization); +- return { "X-Hume-Api-Key": apiKeyValue, Authorization: authHeaderValue }; ++ protected async _getCustomAuthorizationHeaders(): Promise> { ++ const apiKeyValue = await core.Supplier.get(this._options.apiKey); ++ return { "X-Hume-Api-Key": apiKeyValue }; + } + } diff --git a/src/api/resources/tts/resources/voices/client/Client.ts b/src/api/resources/tts/resources/voices/client/Client.ts index d15f5ed1..e95cb171 100644 --- a/src/api/resources/tts/resources/voices/client/Client.ts +++ b/src/api/resources/tts/resources/voices/client/Client.ts @@ -24,7 +24,7 @@ export class Voices { /** * Lists voices you have saved in your account, or voices from the [Voice Library](https://platform.hume.ai/tts/voice-library). * - * @param {Hume.tts.VoicesListRequest} request + * @param {Hume.tts.ListVoicesRequest} request * @param {Voices.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.tts.BadRequestError} @@ -35,12 +35,12 @@ export class Voices { * }) */ public async list( - request: Hume.tts.VoicesListRequest, + request: Hume.tts.ListVoicesRequest, requestOptions?: Voices.RequestOptions, ): Promise> { const list = core.HttpResponsePromise.interceptFunction( - async (request: Hume.tts.VoicesListRequest): Promise> => { - const { provider, pageNumber, pageSize, ascendingOrder, filterTag } = request; + async (request: Hume.tts.ListVoicesRequest): Promise> => { + const { provider, pageNumber, pageSize, ascendingOrder } = request; const _queryParams: Record = {}; _queryParams.provider = serializers.tts.VoiceProvider.jsonOrThrow(provider, { unrecognizedObjectKeys: "strip", @@ -55,13 +55,6 @@ export class Voices { if (ascendingOrder != null) { _queryParams.ascending_order = ascendingOrder.toString(); } - if (filterTag != null) { - if (Array.isArray(filterTag)) { - _queryParams.filter_tag = filterTag.map((item) => item); - } else { - _queryParams.filter_tag = filterTag; - } - } const _headers: core.Fetcher.Args["headers"] = mergeHeaders( this._options?.headers, mergeOnlyDefinedHeaders({ ...(await this._getCustomAuthorizationHeaders()) }), @@ -255,7 +248,7 @@ export class Voices { /** * Deletes a previously generated custom voice. * - * @param {Hume.tts.VoicesDeleteRequest} request + * @param {Hume.tts.DeleteVoicesRequest} request * @param {Voices.RequestOptions} requestOptions - Request-specific configuration. * * @throws {@link Hume.tts.BadRequestError} @@ -266,14 +259,14 @@ export class Voices { * }) */ public delete( - request: Hume.tts.VoicesDeleteRequest, + request: Hume.tts.DeleteVoicesRequest, requestOptions?: Voices.RequestOptions, ): core.HttpResponsePromise { return core.HttpResponsePromise.fromPromise(this.__delete(request, requestOptions)); } private async __delete( - request: Hume.tts.VoicesDeleteRequest, + request: Hume.tts.DeleteVoicesRequest, requestOptions?: Voices.RequestOptions, ): Promise> { const { name } = request; diff --git a/src/api/resources/tts/resources/voices/client/requests/VoicesDeleteRequest.ts b/src/api/resources/tts/resources/voices/client/requests/DeleteVoicesRequest.ts similarity index 83% rename from src/api/resources/tts/resources/voices/client/requests/VoicesDeleteRequest.ts rename to src/api/resources/tts/resources/voices/client/requests/DeleteVoicesRequest.ts index 5d385a9d..1b7923c9 100644 --- a/src/api/resources/tts/resources/voices/client/requests/VoicesDeleteRequest.ts +++ b/src/api/resources/tts/resources/voices/client/requests/DeleteVoicesRequest.ts @@ -6,7 +6,7 @@ * name: "David Hume" * } */ -export interface VoicesDeleteRequest { +export interface DeleteVoicesRequest { /** Name of the voice to delete */ name: string; } diff --git a/src/api/resources/tts/resources/voices/client/requests/VoicesListRequest.ts b/src/api/resources/tts/resources/voices/client/requests/ListVoicesRequest.ts similarity index 94% rename from src/api/resources/tts/resources/voices/client/requests/VoicesListRequest.ts rename to src/api/resources/tts/resources/voices/client/requests/ListVoicesRequest.ts index e92dcdda..0764509a 100644 --- a/src/api/resources/tts/resources/voices/client/requests/VoicesListRequest.ts +++ b/src/api/resources/tts/resources/voices/client/requests/ListVoicesRequest.ts @@ -8,7 +8,7 @@ import type * as Hume from "../../../../../../index.js"; * provider: "CUSTOM_VOICE" * } */ -export interface VoicesListRequest { +export interface ListVoicesRequest { /** * Specify the voice provider to filter voices returned by the endpoint: * @@ -29,5 +29,4 @@ export interface VoicesListRequest { */ pageSize?: number; ascendingOrder?: boolean; - filterTag?: string | string[]; } diff --git a/src/api/resources/tts/resources/voices/client/requests/index.ts b/src/api/resources/tts/resources/voices/client/requests/index.ts index 4a67c65d..2ff2266d 100644 --- a/src/api/resources/tts/resources/voices/client/requests/index.ts +++ b/src/api/resources/tts/resources/voices/client/requests/index.ts @@ -1,3 +1,3 @@ +export type { DeleteVoicesRequest } from "./DeleteVoicesRequest.js"; +export type { ListVoicesRequest } from "./ListVoicesRequest.js"; export type { PostedVoice } from "./PostedVoice.js"; -export type { VoicesDeleteRequest } from "./VoicesDeleteRequest.js"; -export type { VoicesListRequest } from "./VoicesListRequest.js"; diff --git a/src/api/resources/tts/types/PostedTts.ts b/src/api/resources/tts/types/PostedTts.ts index 0d61bec5..6b6dccea 100644 --- a/src/api/resources/tts/types/PostedTts.ts +++ b/src/api/resources/tts/types/PostedTts.ts @@ -4,7 +4,7 @@ import type * as Hume from "../../../index.js"; export interface PostedTts { /** Utterances to use as context for generating consistent speech style and prosody across multiple requests. These will not be converted to speech output. */ - context?: Hume.tts.PostedContext; + context?: Hume.tts.PostedContext | null; /** Specifies the output audio file format. */ format?: Hume.tts.Format; /** The set of timestamp types to include in the response. Only supported for Octave 2 requests. */ diff --git a/src/api/resources/tts/types/PostedUtterance.ts b/src/api/resources/tts/types/PostedUtterance.ts index 4eaddd4b..035fa9b6 100644 --- a/src/api/resources/tts/types/PostedUtterance.ts +++ b/src/api/resources/tts/types/PostedUtterance.ts @@ -10,7 +10,7 @@ export interface PostedUtterance { * - **Voice specified**: the description will serve as acting directions for delivery. Keep directions concise—100 characters or fewer—for best results. See our guide on [acting instructions](/docs/text-to-speech-tts/acting-instructions). * - **Voice not specified**: the description will serve as a voice prompt for generating a voice. See our [prompting guide](/docs/text-to-speech-tts/prompting) for design tips. */ - description?: string; + description?: string | null; /** Speed multiplier for the synthesized speech. Extreme values below 0.75 and above 1.5 may sometimes cause instability to the generated output. */ speed?: number; /** The input text to be synthesized into speech. */ @@ -22,5 +22,5 @@ export interface PostedUtterance { * * See our [voices guide](/docs/text-to-speech-tts/voices) for more details on generating and specifying **Voices**. */ - voice?: Hume.tts.PostedUtteranceVoice; + voice?: Hume.tts.PostedUtteranceVoice | null; } diff --git a/src/api/resources/tts/types/PublishTts.ts b/src/api/resources/tts/types/PublishTts.ts index 43e7f2a8..96b32915 100644 --- a/src/api/resources/tts/types/PublishTts.ts +++ b/src/api/resources/tts/types/PublishTts.ts @@ -9,7 +9,7 @@ export interface PublishTts { /** Force the generation of audio and close the stream. */ close?: boolean; /** Natural language instructions describing how the text should be spoken by the model (e.g., `"a soft, gentle voice with a strong British accent"`). */ - description?: string; + description?: string | null; /** Force the generation of audio regardless of how much text has been supplied. */ flush?: boolean; /** A relative measure of how fast this utterance should be spoken. */ @@ -19,5 +19,5 @@ export interface PublishTts { /** Duration of trailing silence (in seconds) to add to this utterance */ trailingSilence?: number; /** The name or ID of the voice from the `Voice Library` to be used as the speaker for this and all subsequent utterances, until the `"voice"` field is updated again. */ - voice?: Hume.tts.PostedUtteranceVoice; + voice?: Hume.tts.PostedUtteranceVoice | null; } diff --git a/src/api/resources/tts/types/ReturnTts.ts b/src/api/resources/tts/types/ReturnTts.ts index 37407099..2c6aad82 100644 --- a/src/api/resources/tts/types/ReturnTts.ts +++ b/src/api/resources/tts/types/ReturnTts.ts @@ -5,5 +5,5 @@ import type * as Hume from "../../../index.js"; export interface ReturnTts { generations: Hume.tts.ReturnGeneration[]; /** A unique ID associated with this request for tracking and troubleshooting. Use this ID when contacting [support](/support) for troubleshooting assistance. */ - requestId?: string; + requestId: string | null; } diff --git a/src/api/resources/tts/types/Snippet.ts b/src/api/resources/tts/types/Snippet.ts index e033c4d6..3612c040 100644 --- a/src/api/resources/tts/types/Snippet.ts +++ b/src/api/resources/tts/types/Snippet.ts @@ -14,7 +14,7 @@ export interface Snippet { /** A list of word or phoneme level timestamps for the generated audio. Timestamps are only returned for Octave 2 requests. */ timestamps: Hume.tts.Timestamp[]; /** The transcribed text of the generated audio. It is only present if `instant_mode` is set to `false`. */ - transcribedText?: string; + transcribedText: string | null; /** The index of the utterance in the request this snippet corresponds to. */ - utteranceIndex?: number; + utteranceIndex: number | null; } diff --git a/src/api/resources/tts/types/SnippetAudioChunk.ts b/src/api/resources/tts/types/SnippetAudioChunk.ts index 6398e959..c9a467f6 100644 --- a/src/api/resources/tts/types/SnippetAudioChunk.ts +++ b/src/api/resources/tts/types/SnippetAudioChunk.ts @@ -24,8 +24,8 @@ export interface SnippetAudioChunk { /** The text of the parent snippet that this chunk corresponds to. */ text: string; /** The transcribed text of the generated audio of the parent snippet that this chunk corresponds to. It is only present if `instant_mode` is set to `false`. */ - transcribedText?: string; + transcribedText: string | null; type: "audio"; /** The index of the utterance in the request that the parent snippet of this chunk corresponds to. */ - utteranceIndex?: number; + utteranceIndex: number | null; } diff --git a/src/core/fetcher/Supplier.ts.diff b/src/core/fetcher/Supplier.ts.diff new file mode 100644 index 00000000..512d963b --- /dev/null +++ b/src/core/fetcher/Supplier.ts.diff @@ -0,0 +1,26 @@ +diff --git a/src/core/fetcher/Supplier.ts b/src/core/fetcher/Supplier.ts +index aa95dd85..867c931c 100644 +--- a/src/core/fetcher/Supplier.ts ++++ b/src/core/fetcher/Supplier.ts +@@ -1,19 +1,11 @@ +-/** THIS FILE IS MANUALLY MAINAINED: see .fernignore */ +-export type Supplier = T | (() => T); ++export type Supplier = T | Promise | (() => T | Promise); + + export const Supplier = { +- get: (supplier: Supplier): T => { ++ get: async (supplier: Supplier): Promise => { + if (typeof supplier === "function") { + return (supplier as () => T)(); + } else { + return supplier; + } + }, +- map: (supplier: Supplier, f: (value: T) => R): Supplier => { +- if (typeof supplier === "function") { +- return () => f(Supplier.get(supplier)); +- } else { +- return f(supplier); +- } +- }, + }; diff --git a/src/core/websocket/ws.ts.diff b/src/core/websocket/ws.ts.diff new file mode 100644 index 00000000..1a02cf70 --- /dev/null +++ b/src/core/websocket/ws.ts.diff @@ -0,0 +1,137 @@ +diff --git a/src/core/websocket/ws.ts b/src/core/websocket/ws.ts +index 9a16477e..cba4f692 100644 +--- a/src/core/websocket/ws.ts ++++ b/src/core/websocket/ws.ts +@@ -3,7 +3,6 @@ import { WebSocket as NodeWebSocket } from "ws"; + import { RUNTIME } from "../runtime/index.js"; + import { toQueryString } from "../url/qs.js"; + import * as Events from "./events.js"; +-import { SDK_VERSION } from "../../version.js"; + + const getGlobalWebSocket = (): WebSocket | undefined => { + if (typeof WebSocket !== "undefined") { +@@ -70,49 +69,6 @@ const DEFAULT_OPTIONS = { + debug: false, + }; + +-function addApiKeyFromHeader({ +- headers, +- queryParameters, +-}: { +- headers: Record | undefined; +- queryParameters: Record | undefined; +-}) { +- const apiKeyValue = Object.entries(headers ?? {}).find(([k]) => k.toLowerCase() === "x-hume-api-key")?.[1]; +- if (apiKeyValue && !queryParameters?.["api_key"]) { +- return { ...queryParameters, api_key: apiKeyValue }; +- } +- return queryParameters; +-} +- +-function addAccessTokenFromHeader({ +- headers, +- queryParameters, +-}: { +- headers: Record | undefined; +- queryParameters: Record | undefined; +-}) { +- const authHeaderValue = headers?.["Authorization"] || headers?.["authorization"]; +- if (!authHeaderValue) { +- return queryParameters; +- } +- if (!authHeaderValue.startsWith("Bearer ")) { +- return queryParameters; +- } +- if (queryParameters?.["access_token"]) { +- return queryParameters; +- } +- const token = authHeaderValue.substring("Bearer ".length); +- return { ...queryParameters, access_token: token }; +-} +- +-function addSdkTracking(queryParameters: Record | undefined) { +- return { +- ...queryParameters, +- fernSdkLanguage: "JavaScript", +- fernSdkVersion: SDK_VERSION, +- }; +-} +- + export class ReconnectingWebSocket { + private _ws?: WebSocket; + private _listeners: ReconnectingWebSocket.ListenersMap = { +@@ -141,47 +97,22 @@ export class ReconnectingWebSocket { + this._protocols = protocols; + this._options = options ?? DEFAULT_OPTIONS; + this._headers = headers; +- this._queryParameters = addSdkTracking( +- addAccessTokenFromHeader({ +- headers, +- queryParameters: addApiKeyFromHeader({ +- headers, +- queryParameters, +- }), +- }), +- ); +- ++ this._queryParameters = queryParameters; + if (this._options.startClosed) { + this._shouldReconnect = false; + } + this._connect(); + } + +- static get CONNECTING() { +- return 0; +- } +- static get OPEN() { +- return 1; +- } +- static get CLOSING() { +- return 2; +- } +- static get CLOSED() { +- return 3; +- } ++ public static readonly CONNECTING = 0; ++ public static readonly OPEN = 1; ++ public static readonly CLOSING = 2; ++ public static readonly CLOSED = 3; + +- get CONNECTING(): number { +- return ReconnectingWebSocket.CONNECTING; +- } +- get OPEN(): number { +- return ReconnectingWebSocket.OPEN; +- } +- get CLOSING(): number { +- return ReconnectingWebSocket.CLOSING; +- } +- get CLOSED(): number { +- return ReconnectingWebSocket.CLOSED; +- } ++ public readonly CONNECTING: typeof ReconnectingWebSocket.CONNECTING = ReconnectingWebSocket.CONNECTING; ++ public readonly OPEN: typeof ReconnectingWebSocket.OPEN = ReconnectingWebSocket.OPEN; ++ public readonly CLOSING: typeof ReconnectingWebSocket.CLOSING = ReconnectingWebSocket.CLOSING; ++ public readonly CLOSED: typeof ReconnectingWebSocket.CLOSED = ReconnectingWebSocket.CLOSED; + + get binaryType() { + return this._ws ? this._ws.binaryType : this._binaryType; +@@ -383,7 +314,7 @@ export class ReconnectingWebSocket { + } = this._options; + let delay = 0; + if (this._retryCount > 0) { +- delay = minReconnectionDelay * Math.pow(reconnectionDelayGrowFactor, this._retryCount - 1); ++ delay = minReconnectionDelay * reconnectionDelayGrowFactor ** (this._retryCount - 1); + if (delay > maxReconnectionDelay) { + delay = maxReconnectionDelay; + } +@@ -478,7 +409,7 @@ export class ReconnectingWebSocket { + try { + this._ws.close(code, reason); + this._handleClose(new Events.CloseEvent(code, reason, this)); +- } catch (error) { ++ } catch (_error) { + // ignore + } + } diff --git a/src/index.ts.diff b/src/index.ts.diff new file mode 100644 index 00000000..3c4cba14 --- /dev/null +++ b/src/index.ts.diff @@ -0,0 +1,17 @@ +diff --git a/src/index.ts b/src/index.ts +index 4d199020..c814670b 100644 +--- a/src/index.ts ++++ b/src/index.ts +@@ -1,8 +1,7 @@ + export * as Hume from "./api/index.js"; +-export * as serialization from "./serialization/index.js"; ++export type { BaseClientOptions, BaseRequestOptions } from "./BaseClient.js"; ++export { HumeClient } from "./Client.js"; ++export { HumeEnvironment, type HumeEnvironmentUrls } from "./environments.js"; + export { HumeError, HumeTimeoutError } from "./errors/index.js"; +-export { HumeEnvironment } from "./environments.js"; +-export type { HumeEnvironmentUrls } from "./environments.js"; + export * from "./exports.js"; +- +-export * from "./wrapper/index.js"; ++export * as serialization from "./serialization/index.js"; diff --git a/src/serialization/resources/empathicVoice/resources/chat/index.ts.diff b/src/serialization/resources/empathicVoice/resources/chat/index.ts.diff new file mode 100644 index 00000000..c11f606d --- /dev/null +++ b/src/serialization/resources/empathicVoice/resources/chat/index.ts.diff @@ -0,0 +1,12 @@ +diff --git a/src/serialization/resources/empathicVoice/resources/chat/index.ts b/src/serialization/resources/empathicVoice/resources/chat/index.ts +index f5ca113c..d9adb1af 100644 +--- a/src/serialization/resources/empathicVoice/resources/chat/index.ts ++++ b/src/serialization/resources/empathicVoice/resources/chat/index.ts +@@ -1,7 +1,2 @@ + export * from "./client/index.js"; + export * from "./types/index.js"; +-/** +- * @deprecated Use `serialization.empathicVoice.SubscribeEvent` instead. +- * This serializer alias will be removed in a future version. +- */ +-export { SubscribeEvent } from "./types/SubscribeEvent.js"; diff --git a/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts b/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts index a72387ae..e9a19ecf 100644 --- a/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts +++ b/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts @@ -20,36 +20,39 @@ export const PostedConfig: core.serialization.Schema< > = core.serialization.object({ builtinTools: core.serialization.property( "builtin_tools", - core.serialization.list(PostedBuiltinTool.optional()).optional(), + core.serialization.list(PostedBuiltinTool.nullable()).optionalNullable(), ), - ellmModel: core.serialization.property("ellm_model", PostedEllmModel.optional()), - eventMessages: core.serialization.property("event_messages", PostedEventMessageSpecs.optional()), + ellmModel: core.serialization.property("ellm_model", PostedEllmModel.optionalNullable()), + eventMessages: core.serialization.property("event_messages", PostedEventMessageSpecs.optionalNullable()), eviVersion: core.serialization.property("evi_version", core.serialization.string()), - languageModel: core.serialization.property("language_model", PostedLanguageModel.optional()), + languageModel: core.serialization.property("language_model", PostedLanguageModel.optionalNullable()), name: core.serialization.string(), - nudges: PostedNudgeSpec.optional(), - prompt: PostedConfigPromptSpec.optional(), - timeouts: PostedTimeoutSpecs.optional(), - tools: core.serialization.list(PostedUserDefinedToolSpec.optional()).optional(), - versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), + nudges: PostedNudgeSpec.optionalNullable(), + prompt: PostedConfigPromptSpec.optionalNullable(), + timeouts: PostedTimeoutSpecs.optionalNullable(), + tools: core.serialization.list(PostedUserDefinedToolSpec.nullable()).optionalNullable(), + versionDescription: core.serialization.property( + "version_description", + core.serialization.string().optionalNullable(), + ), voice: VoiceRef.optional(), - webhooks: core.serialization.list(PostedWebhookSpec.optional()).optional(), + webhooks: core.serialization.list(PostedWebhookSpec.nullable()).optionalNullable(), }); export declare namespace PostedConfig { export interface Raw { - builtin_tools?: (PostedBuiltinTool.Raw | null | undefined)[] | null; - ellm_model?: PostedEllmModel.Raw | null; - event_messages?: PostedEventMessageSpecs.Raw | null; + builtin_tools?: ((PostedBuiltinTool.Raw | null | undefined)[] | null | undefined) | null; + ellm_model?: (PostedEllmModel.Raw | null | undefined) | null; + event_messages?: (PostedEventMessageSpecs.Raw | null | undefined) | null; evi_version: string; - language_model?: PostedLanguageModel.Raw | null; + language_model?: (PostedLanguageModel.Raw | null | undefined) | null; name: string; - nudges?: PostedNudgeSpec.Raw | null; - prompt?: PostedConfigPromptSpec.Raw | null; - timeouts?: PostedTimeoutSpecs.Raw | null; - tools?: (PostedUserDefinedToolSpec.Raw | null | undefined)[] | null; - version_description?: string | null; + nudges?: (PostedNudgeSpec.Raw | null | undefined) | null; + prompt?: (PostedConfigPromptSpec.Raw | null | undefined) | null; + timeouts?: (PostedTimeoutSpecs.Raw | null | undefined) | null; + tools?: ((PostedUserDefinedToolSpec.Raw | null | undefined)[] | null | undefined) | null; + version_description?: (string | null | undefined) | null; voice?: VoiceRef.Raw | null; - webhooks?: (PostedWebhookSpec.Raw | null | undefined)[] | null; + webhooks?: ((PostedWebhookSpec.Raw | null | undefined)[] | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigName.ts b/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigName.ts index 20bde02f..ef1d6f6e 100644 --- a/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigName.ts +++ b/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigName.ts @@ -6,7 +6,7 @@ import type * as serializers from "../../../../../../index.js"; export const PostedConfigName: core.serialization.Schema< serializers.empathicVoice.PostedConfigName.Raw, - Hume.empathicVoice.PostedConfigName + Omit > = core.serialization.object({ name: core.serialization.string(), }); diff --git a/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts b/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts index 6b213f78..e4a73b60 100644 --- a/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts +++ b/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts @@ -16,38 +16,41 @@ import { VoiceRef } from "../../../../types/VoiceRef.js"; export const PostedConfigVersion: core.serialization.Schema< serializers.empathicVoice.PostedConfigVersion.Raw, - Hume.empathicVoice.PostedConfigVersion + Omit > = core.serialization.object({ builtinTools: core.serialization.property( "builtin_tools", - core.serialization.list(PostedBuiltinTool.optional()).optional(), + core.serialization.list(PostedBuiltinTool.nullable()).optionalNullable(), ), - ellmModel: core.serialization.property("ellm_model", PostedEllmModel.optional()), - eventMessages: core.serialization.property("event_messages", PostedEventMessageSpecs.optional()), + ellmModel: core.serialization.property("ellm_model", PostedEllmModel.optionalNullable()), + eventMessages: core.serialization.property("event_messages", PostedEventMessageSpecs.optionalNullable()), eviVersion: core.serialization.property("evi_version", core.serialization.string()), - languageModel: core.serialization.property("language_model", PostedLanguageModel.optional()), - nudges: PostedNudgeSpec.optional(), - prompt: PostedConfigPromptSpec.optional(), - timeouts: PostedTimeoutSpecs.optional(), - tools: core.serialization.list(PostedUserDefinedToolSpec.optional()).optional(), - versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), + languageModel: core.serialization.property("language_model", PostedLanguageModel.optionalNullable()), + nudges: PostedNudgeSpec.optionalNullable(), + prompt: PostedConfigPromptSpec.optionalNullable(), + timeouts: PostedTimeoutSpecs.optionalNullable(), + tools: core.serialization.list(PostedUserDefinedToolSpec.nullable()).optionalNullable(), + versionDescription: core.serialization.property( + "version_description", + core.serialization.string().optionalNullable(), + ), voice: VoiceRef.optional(), - webhooks: core.serialization.list(PostedWebhookSpec.optional()).optional(), + webhooks: core.serialization.list(PostedWebhookSpec.nullable()).optionalNullable(), }); export declare namespace PostedConfigVersion { export interface Raw { - builtin_tools?: (PostedBuiltinTool.Raw | null | undefined)[] | null; - ellm_model?: PostedEllmModel.Raw | null; - event_messages?: PostedEventMessageSpecs.Raw | null; + builtin_tools?: ((PostedBuiltinTool.Raw | null | undefined)[] | null | undefined) | null; + ellm_model?: (PostedEllmModel.Raw | null | undefined) | null; + event_messages?: (PostedEventMessageSpecs.Raw | null | undefined) | null; evi_version: string; - language_model?: PostedLanguageModel.Raw | null; - nudges?: PostedNudgeSpec.Raw | null; - prompt?: PostedConfigPromptSpec.Raw | null; - timeouts?: PostedTimeoutSpecs.Raw | null; - tools?: (PostedUserDefinedToolSpec.Raw | null | undefined)[] | null; - version_description?: string | null; + language_model?: (PostedLanguageModel.Raw | null | undefined) | null; + nudges?: (PostedNudgeSpec.Raw | null | undefined) | null; + prompt?: (PostedConfigPromptSpec.Raw | null | undefined) | null; + timeouts?: (PostedTimeoutSpecs.Raw | null | undefined) | null; + tools?: ((PostedUserDefinedToolSpec.Raw | null | undefined)[] | null | undefined) | null; + version_description?: (string | null | undefined) | null; voice?: VoiceRef.Raw | null; - webhooks?: (PostedWebhookSpec.Raw | null | undefined)[] | null; + webhooks?: ((PostedWebhookSpec.Raw | null | undefined)[] | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersionDescription.ts b/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersionDescription.ts index 1bacf3f1..2ff48793 100644 --- a/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersionDescription.ts +++ b/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersionDescription.ts @@ -6,13 +6,16 @@ import type * as serializers from "../../../../../../index.js"; export const PostedConfigVersionDescription: core.serialization.Schema< serializers.empathicVoice.PostedConfigVersionDescription.Raw, - Hume.empathicVoice.PostedConfigVersionDescription + Omit > = core.serialization.object({ - versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), + versionDescription: core.serialization.property( + "version_description", + core.serialization.string().optionalNullable(), + ), }); export declare namespace PostedConfigVersionDescription { export interface Raw { - version_description?: string | null; + version_description?: (string | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/resources/prompts/client/createPrompt.ts b/src/serialization/resources/empathicVoice/resources/prompts/client/createPrompt.ts index 4595a7cd..d1c31d86 100644 --- a/src/serialization/resources/empathicVoice/resources/prompts/client/createPrompt.ts +++ b/src/serialization/resources/empathicVoice/resources/prompts/client/createPrompt.ts @@ -7,8 +7,8 @@ import { ReturnPrompt } from "../../../types/ReturnPrompt.js"; export const Response: core.serialization.Schema< serializers.empathicVoice.prompts.createPrompt.Response.Raw, - Hume.empathicVoice.ReturnPrompt | undefined -> = ReturnPrompt.optional(); + Hume.empathicVoice.ReturnPrompt | null +> = ReturnPrompt.nullable(); export declare namespace Response { export type Raw = ReturnPrompt.Raw | null | undefined; diff --git a/src/serialization/resources/empathicVoice/resources/prompts/client/createPromptVersion.ts b/src/serialization/resources/empathicVoice/resources/prompts/client/createPromptVersion.ts index 2febf797..5821be19 100644 --- a/src/serialization/resources/empathicVoice/resources/prompts/client/createPromptVersion.ts +++ b/src/serialization/resources/empathicVoice/resources/prompts/client/createPromptVersion.ts @@ -7,8 +7,8 @@ import { ReturnPrompt } from "../../../types/ReturnPrompt.js"; export const Response: core.serialization.Schema< serializers.empathicVoice.prompts.createPromptVersion.Response.Raw, - Hume.empathicVoice.ReturnPrompt | undefined -> = ReturnPrompt.optional(); + Hume.empathicVoice.ReturnPrompt | null +> = ReturnPrompt.nullable(); export declare namespace Response { export type Raw = ReturnPrompt.Raw | null | undefined; diff --git a/src/serialization/resources/empathicVoice/resources/prompts/client/getPromptVersion.ts b/src/serialization/resources/empathicVoice/resources/prompts/client/getPromptVersion.ts index 39e0d3f3..f9b942db 100644 --- a/src/serialization/resources/empathicVoice/resources/prompts/client/getPromptVersion.ts +++ b/src/serialization/resources/empathicVoice/resources/prompts/client/getPromptVersion.ts @@ -7,8 +7,8 @@ import { ReturnPrompt } from "../../../types/ReturnPrompt.js"; export const Response: core.serialization.Schema< serializers.empathicVoice.prompts.getPromptVersion.Response.Raw, - Hume.empathicVoice.ReturnPrompt | undefined -> = ReturnPrompt.optional(); + Hume.empathicVoice.ReturnPrompt | null +> = ReturnPrompt.nullable(); export declare namespace Response { export type Raw = ReturnPrompt.Raw | null | undefined; diff --git a/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPrompt.ts b/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPrompt.ts index 3708246a..845235f7 100644 --- a/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPrompt.ts +++ b/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPrompt.ts @@ -10,13 +10,16 @@ export const PostedPrompt: core.serialization.Schema< > = core.serialization.object({ name: core.serialization.string(), text: core.serialization.string(), - versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), + versionDescription: core.serialization.property( + "version_description", + core.serialization.string().optionalNullable(), + ), }); export declare namespace PostedPrompt { export interface Raw { name: string; text: string; - version_description?: string | null; + version_description?: (string | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPromptName.ts b/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPromptName.ts index db4e7e26..daa31d3d 100644 --- a/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPromptName.ts +++ b/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPromptName.ts @@ -6,7 +6,7 @@ import type * as serializers from "../../../../../../index.js"; export const PostedPromptName: core.serialization.Schema< serializers.empathicVoice.PostedPromptName.Raw, - Hume.empathicVoice.PostedPromptName + Omit > = core.serialization.object({ name: core.serialization.string(), }); diff --git a/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersion.ts b/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersion.ts index a6bac856..ef73d2eb 100644 --- a/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersion.ts +++ b/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersion.ts @@ -6,15 +6,18 @@ import type * as serializers from "../../../../../../index.js"; export const PostedPromptVersion: core.serialization.Schema< serializers.empathicVoice.PostedPromptVersion.Raw, - Hume.empathicVoice.PostedPromptVersion + Omit > = core.serialization.object({ text: core.serialization.string(), - versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), + versionDescription: core.serialization.property( + "version_description", + core.serialization.string().optionalNullable(), + ), }); export declare namespace PostedPromptVersion { export interface Raw { text: string; - version_description?: string | null; + version_description?: (string | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersionDescription.ts b/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersionDescription.ts index c762358e..eada6f0c 100644 --- a/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersionDescription.ts +++ b/src/serialization/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersionDescription.ts @@ -6,13 +6,16 @@ import type * as serializers from "../../../../../../index.js"; export const PostedPromptVersionDescription: core.serialization.Schema< serializers.empathicVoice.PostedPromptVersionDescription.Raw, - Hume.empathicVoice.PostedPromptVersionDescription + Omit > = core.serialization.object({ - versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), + versionDescription: core.serialization.property( + "version_description", + core.serialization.string().optionalNullable(), + ), }); export declare namespace PostedPromptVersionDescription { export interface Raw { - version_description?: string | null; + version_description?: (string | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/resources/prompts/client/updatePromptDescription.ts b/src/serialization/resources/empathicVoice/resources/prompts/client/updatePromptDescription.ts index 6956bf2d..cba4c556 100644 --- a/src/serialization/resources/empathicVoice/resources/prompts/client/updatePromptDescription.ts +++ b/src/serialization/resources/empathicVoice/resources/prompts/client/updatePromptDescription.ts @@ -7,8 +7,8 @@ import { ReturnPrompt } from "../../../types/ReturnPrompt.js"; export const Response: core.serialization.Schema< serializers.empathicVoice.prompts.updatePromptDescription.Response.Raw, - Hume.empathicVoice.ReturnPrompt | undefined -> = ReturnPrompt.optional(); + Hume.empathicVoice.ReturnPrompt | null +> = ReturnPrompt.nullable(); export declare namespace Response { export type Raw = ReturnPrompt.Raw | null | undefined; diff --git a/src/serialization/resources/empathicVoice/resources/tools/client/createTool.ts b/src/serialization/resources/empathicVoice/resources/tools/client/createTool.ts index 88f7362e..9f8706ff 100644 --- a/src/serialization/resources/empathicVoice/resources/tools/client/createTool.ts +++ b/src/serialization/resources/empathicVoice/resources/tools/client/createTool.ts @@ -7,8 +7,8 @@ import { ReturnUserDefinedTool } from "../../../types/ReturnUserDefinedTool.js"; export const Response: core.serialization.Schema< serializers.empathicVoice.tools.createTool.Response.Raw, - Hume.empathicVoice.ReturnUserDefinedTool | undefined -> = ReturnUserDefinedTool.optional(); + Hume.empathicVoice.ReturnUserDefinedTool | null +> = ReturnUserDefinedTool.nullable(); export declare namespace Response { export type Raw = ReturnUserDefinedTool.Raw | null | undefined; diff --git a/src/serialization/resources/empathicVoice/resources/tools/client/createToolVersion.ts b/src/serialization/resources/empathicVoice/resources/tools/client/createToolVersion.ts index 1b15071d..9f52a9ca 100644 --- a/src/serialization/resources/empathicVoice/resources/tools/client/createToolVersion.ts +++ b/src/serialization/resources/empathicVoice/resources/tools/client/createToolVersion.ts @@ -7,8 +7,8 @@ import { ReturnUserDefinedTool } from "../../../types/ReturnUserDefinedTool.js"; export const Response: core.serialization.Schema< serializers.empathicVoice.tools.createToolVersion.Response.Raw, - Hume.empathicVoice.ReturnUserDefinedTool | undefined -> = ReturnUserDefinedTool.optional(); + Hume.empathicVoice.ReturnUserDefinedTool | null +> = ReturnUserDefinedTool.nullable(); export declare namespace Response { export type Raw = ReturnUserDefinedTool.Raw | null | undefined; diff --git a/src/serialization/resources/empathicVoice/resources/tools/client/getToolVersion.ts b/src/serialization/resources/empathicVoice/resources/tools/client/getToolVersion.ts index 420fb1b8..a41ec3d4 100644 --- a/src/serialization/resources/empathicVoice/resources/tools/client/getToolVersion.ts +++ b/src/serialization/resources/empathicVoice/resources/tools/client/getToolVersion.ts @@ -7,8 +7,8 @@ import { ReturnUserDefinedTool } from "../../../types/ReturnUserDefinedTool.js"; export const Response: core.serialization.Schema< serializers.empathicVoice.tools.getToolVersion.Response.Raw, - Hume.empathicVoice.ReturnUserDefinedTool | undefined -> = ReturnUserDefinedTool.optional(); + Hume.empathicVoice.ReturnUserDefinedTool | null +> = ReturnUserDefinedTool.nullable(); export declare namespace Response { export type Raw = ReturnUserDefinedTool.Raw | null | undefined; diff --git a/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedTool.ts b/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedTool.ts index 2725c4b4..af3ce0ee 100644 --- a/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedTool.ts +++ b/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedTool.ts @@ -8,19 +8,22 @@ export const PostedUserDefinedTool: core.serialization.Schema< serializers.empathicVoice.PostedUserDefinedTool.Raw, Hume.empathicVoice.PostedUserDefinedTool > = core.serialization.object({ - description: core.serialization.string().optional(), - fallbackContent: core.serialization.property("fallback_content", core.serialization.string().optional()), + description: core.serialization.string().optionalNullable(), + fallbackContent: core.serialization.property("fallback_content", core.serialization.string().optionalNullable()), name: core.serialization.string(), parameters: core.serialization.string(), - versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), + versionDescription: core.serialization.property( + "version_description", + core.serialization.string().optionalNullable(), + ), }); export declare namespace PostedUserDefinedTool { export interface Raw { - description?: string | null; - fallback_content?: string | null; + description?: (string | null | undefined) | null; + fallback_content?: (string | null | undefined) | null; name: string; parameters: string; - version_description?: string | null; + version_description?: (string | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolName.ts b/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolName.ts index fdc14c56..a343b93d 100644 --- a/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolName.ts +++ b/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolName.ts @@ -6,7 +6,7 @@ import type * as serializers from "../../../../../../index.js"; export const PostedUserDefinedToolName: core.serialization.Schema< serializers.empathicVoice.PostedUserDefinedToolName.Raw, - Hume.empathicVoice.PostedUserDefinedToolName + Omit > = core.serialization.object({ name: core.serialization.string(), }); diff --git a/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersion.ts b/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersion.ts index f339ef22..da053fc8 100644 --- a/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersion.ts +++ b/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersion.ts @@ -6,19 +6,22 @@ import type * as serializers from "../../../../../../index.js"; export const PostedUserDefinedToolVersion: core.serialization.Schema< serializers.empathicVoice.PostedUserDefinedToolVersion.Raw, - Hume.empathicVoice.PostedUserDefinedToolVersion + Omit > = core.serialization.object({ - description: core.serialization.string().optional(), - fallbackContent: core.serialization.property("fallback_content", core.serialization.string().optional()), + description: core.serialization.string().optionalNullable(), + fallbackContent: core.serialization.property("fallback_content", core.serialization.string().optionalNullable()), parameters: core.serialization.string(), - versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), + versionDescription: core.serialization.property( + "version_description", + core.serialization.string().optionalNullable(), + ), }); export declare namespace PostedUserDefinedToolVersion { export interface Raw { - description?: string | null; - fallback_content?: string | null; + description?: (string | null | undefined) | null; + fallback_content?: (string | null | undefined) | null; parameters: string; - version_description?: string | null; + version_description?: (string | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersionDescription.ts b/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersionDescription.ts index 2cb02766..711974fb 100644 --- a/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersionDescription.ts +++ b/src/serialization/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersionDescription.ts @@ -6,13 +6,16 @@ import type * as serializers from "../../../../../../index.js"; export const PostedUserDefinedToolVersionDescription: core.serialization.Schema< serializers.empathicVoice.PostedUserDefinedToolVersionDescription.Raw, - Hume.empathicVoice.PostedUserDefinedToolVersionDescription + Omit > = core.serialization.object({ - versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), + versionDescription: core.serialization.property( + "version_description", + core.serialization.string().optionalNullable(), + ), }); export declare namespace PostedUserDefinedToolVersionDescription { export interface Raw { - version_description?: string | null; + version_description?: (string | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/resources/tools/client/updateToolDescription.ts b/src/serialization/resources/empathicVoice/resources/tools/client/updateToolDescription.ts index 08a91718..871109a2 100644 --- a/src/serialization/resources/empathicVoice/resources/tools/client/updateToolDescription.ts +++ b/src/serialization/resources/empathicVoice/resources/tools/client/updateToolDescription.ts @@ -7,8 +7,8 @@ import { ReturnUserDefinedTool } from "../../../types/ReturnUserDefinedTool.js"; export const Response: core.serialization.Schema< serializers.empathicVoice.tools.updateToolDescription.Response.Raw, - Hume.empathicVoice.ReturnUserDefinedTool | undefined -> = ReturnUserDefinedTool.optional(); + Hume.empathicVoice.ReturnUserDefinedTool | null +> = ReturnUserDefinedTool.nullable(); export declare namespace Response { export type Raw = ReturnUserDefinedTool.Raw | null | undefined; diff --git a/src/serialization/resources/empathicVoice/types/AssistantEnd.ts b/src/serialization/resources/empathicVoice/types/AssistantEnd.ts index f485a842..78c3e293 100644 --- a/src/serialization/resources/empathicVoice/types/AssistantEnd.ts +++ b/src/serialization/resources/empathicVoice/types/AssistantEnd.ts @@ -8,13 +8,13 @@ export const AssistantEnd: core.serialization.ObjectSchema< serializers.empathicVoice.AssistantEnd.Raw, Hume.empathicVoice.AssistantEnd > = core.serialization.object({ - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), type: core.serialization.stringLiteral("assistant_end"), }); export declare namespace AssistantEnd { export interface Raw { - custom_session_id?: string | null; + custom_session_id?: (string | null | undefined) | null; type: "assistant_end"; } } diff --git a/src/serialization/resources/empathicVoice/types/AssistantInput.ts b/src/serialization/resources/empathicVoice/types/AssistantInput.ts index fa255cce..386825ac 100644 --- a/src/serialization/resources/empathicVoice/types/AssistantInput.ts +++ b/src/serialization/resources/empathicVoice/types/AssistantInput.ts @@ -3,20 +3,21 @@ import type * as Hume from "../../../../api/index.js"; import * as core from "../../../../core/index.js"; import type * as serializers from "../../../index.js"; +import { AssistantInputType } from "./AssistantInputType.js"; export const AssistantInput: core.serialization.ObjectSchema< serializers.empathicVoice.AssistantInput.Raw, Hume.empathicVoice.AssistantInput > = core.serialization.object({ - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), text: core.serialization.string(), - type: core.serialization.stringLiteral("assistant_input"), + type: AssistantInputType, }); export declare namespace AssistantInput { export interface Raw { - custom_session_id?: string | null; + custom_session_id?: (string | null | undefined) | null; text: string; - type: "assistant_input"; + type: AssistantInputType.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/AssistantInputType.ts b/src/serialization/resources/empathicVoice/types/AssistantInputType.ts new file mode 100644 index 00000000..ace4758d --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/AssistantInputType.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const AssistantInputType: core.serialization.Schema< + serializers.empathicVoice.AssistantInputType.Raw, + Hume.empathicVoice.AssistantInputType +> = core.serialization.enum_(["assistant_input"]); + +export declare namespace AssistantInputType { + export type Raw = "assistant_input"; +} diff --git a/src/serialization/resources/empathicVoice/types/AssistantMessage.ts b/src/serialization/resources/empathicVoice/types/AssistantMessage.ts index d701d6fb..1c460960 100644 --- a/src/serialization/resources/empathicVoice/types/AssistantMessage.ts +++ b/src/serialization/resources/empathicVoice/types/AssistantMessage.ts @@ -10,11 +10,10 @@ export const AssistantMessage: core.serialization.ObjectSchema< serializers.empathicVoice.AssistantMessage.Raw, Hume.empathicVoice.AssistantMessage > = core.serialization.object({ - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), fromText: core.serialization.property("from_text", core.serialization.boolean()), id: core.serialization.string().optional(), - isQuickResponse: core.serialization.property("is_quick_response", core.serialization.boolean()), - language: core.serialization.string().optional(), + language: core.serialization.string().optionalNullable(), message: ChatMessage, models: Inference, type: core.serialization.stringLiteral("assistant_message"), @@ -22,11 +21,10 @@ export const AssistantMessage: core.serialization.ObjectSchema< export declare namespace AssistantMessage { export interface Raw { - custom_session_id?: string | null; + custom_session_id?: (string | null | undefined) | null; from_text: boolean; id?: string | null; - is_quick_response: boolean; - language?: string | null; + language?: (string | null | undefined) | null; message: ChatMessage.Raw; models: Inference.Raw; type: "assistant_message"; diff --git a/src/serialization/resources/empathicVoice/types/AssistantProsody.ts b/src/serialization/resources/empathicVoice/types/AssistantProsody.ts index c624b1b5..e346d58f 100644 --- a/src/serialization/resources/empathicVoice/types/AssistantProsody.ts +++ b/src/serialization/resources/empathicVoice/types/AssistantProsody.ts @@ -3,23 +3,24 @@ import type * as Hume from "../../../../api/index.js"; import * as core from "../../../../core/index.js"; import type * as serializers from "../../../index.js"; +import { AssistantProsodyType } from "./AssistantProsodyType.js"; import { Inference } from "./Inference.js"; export const AssistantProsody: core.serialization.ObjectSchema< serializers.empathicVoice.AssistantProsody.Raw, Hume.empathicVoice.AssistantProsody > = core.serialization.object({ - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), id: core.serialization.string().optional(), models: Inference, - type: core.serialization.stringLiteral("assistant_prosody"), + type: AssistantProsodyType, }); export declare namespace AssistantProsody { export interface Raw { - custom_session_id?: string | null; + custom_session_id?: (string | null | undefined) | null; id?: string | null; models: Inference.Raw; - type: "assistant_prosody"; + type: AssistantProsodyType.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/AssistantProsodyType.ts b/src/serialization/resources/empathicVoice/types/AssistantProsodyType.ts new file mode 100644 index 00000000..c68c6db9 --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/AssistantProsodyType.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const AssistantProsodyType: core.serialization.Schema< + serializers.empathicVoice.AssistantProsodyType.Raw, + Hume.empathicVoice.AssistantProsodyType +> = core.serialization.enum_(["assistant_prosody"]); + +export declare namespace AssistantProsodyType { + export type Raw = "assistant_prosody"; +} diff --git a/src/serialization/resources/empathicVoice/types/AudioConfiguration.ts b/src/serialization/resources/empathicVoice/types/AudioConfiguration.ts index 7047e516..9701c62c 100644 --- a/src/serialization/resources/empathicVoice/types/AudioConfiguration.ts +++ b/src/serialization/resources/empathicVoice/types/AudioConfiguration.ts @@ -10,7 +10,7 @@ export const AudioConfiguration: core.serialization.ObjectSchema< Hume.empathicVoice.AudioConfiguration > = core.serialization.object({ channels: core.serialization.number(), - codec: core.serialization.string().optional(), + codec: core.serialization.string().optionalNullable(), encoding: Encoding, sampleRate: core.serialization.property("sample_rate", core.serialization.number()), }); @@ -18,7 +18,7 @@ export const AudioConfiguration: core.serialization.ObjectSchema< export declare namespace AudioConfiguration { export interface Raw { channels: number; - codec?: string | null; + codec?: (string | null | undefined) | null; encoding: Encoding.Raw; sample_rate: number; } diff --git a/src/serialization/resources/empathicVoice/types/AudioInput.ts b/src/serialization/resources/empathicVoice/types/AudioInput.ts index 9bf6c17e..0c667d8b 100644 --- a/src/serialization/resources/empathicVoice/types/AudioInput.ts +++ b/src/serialization/resources/empathicVoice/types/AudioInput.ts @@ -8,14 +8,14 @@ export const AudioInput: core.serialization.ObjectSchema< serializers.empathicVoice.AudioInput.Raw, Hume.empathicVoice.AudioInput > = core.serialization.object({ - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), data: core.serialization.string(), type: core.serialization.stringLiteral("audio_input"), }); export declare namespace AudioInput { export interface Raw { - custom_session_id?: string | null; + custom_session_id?: (string | null | undefined) | null; data: string; type: "audio_input"; } diff --git a/src/serialization/resources/empathicVoice/types/AudioOutput.ts b/src/serialization/resources/empathicVoice/types/AudioOutput.ts index daf5cbd6..974326ea 100644 --- a/src/serialization/resources/empathicVoice/types/AudioOutput.ts +++ b/src/serialization/resources/empathicVoice/types/AudioOutput.ts @@ -8,7 +8,7 @@ export const AudioOutput: core.serialization.ObjectSchema< serializers.empathicVoice.AudioOutput.Raw, Hume.empathicVoice.AudioOutput > = core.serialization.object({ - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), data: core.serialization.string(), id: core.serialization.string(), index: core.serialization.number(), @@ -17,7 +17,7 @@ export const AudioOutput: core.serialization.ObjectSchema< export declare namespace AudioOutput { export interface Raw { - custom_session_id?: string | null; + custom_session_id?: (string | null | undefined) | null; data: string; id: string; index: number; diff --git a/src/serialization/resources/empathicVoice/types/BuiltinToolConfig.ts b/src/serialization/resources/empathicVoice/types/BuiltinToolConfig.ts index 8587c3b6..0a37e21b 100644 --- a/src/serialization/resources/empathicVoice/types/BuiltinToolConfig.ts +++ b/src/serialization/resources/empathicVoice/types/BuiltinToolConfig.ts @@ -9,13 +9,13 @@ export const BuiltinToolConfig: core.serialization.ObjectSchema< serializers.empathicVoice.BuiltinToolConfig.Raw, Hume.empathicVoice.BuiltinToolConfig > = core.serialization.object({ - fallbackContent: core.serialization.property("fallback_content", core.serialization.string().optional()), + fallbackContent: core.serialization.property("fallback_content", core.serialization.string().optionalNullable()), name: BuiltInTool, }); export declare namespace BuiltinToolConfig { export interface Raw { - fallback_content?: string | null; + fallback_content?: (string | null | undefined) | null; name: BuiltInTool.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/ChatMessage.ts b/src/serialization/resources/empathicVoice/types/ChatMessage.ts index 15c2927d..1d0ae1a8 100644 --- a/src/serialization/resources/empathicVoice/types/ChatMessage.ts +++ b/src/serialization/resources/empathicVoice/types/ChatMessage.ts @@ -11,17 +11,17 @@ export const ChatMessage: core.serialization.ObjectSchema< serializers.empathicVoice.ChatMessage.Raw, Hume.empathicVoice.ChatMessage > = core.serialization.object({ - content: core.serialization.string().optional(), + content: core.serialization.string().optionalNullable(), role: Role, - toolCall: core.serialization.property("tool_call", ToolCallMessage.optional()), - toolResult: core.serialization.property("tool_result", ChatMessageToolResult.optional()), + toolCall: core.serialization.property("tool_call", ToolCallMessage.optionalNullable()), + toolResult: core.serialization.property("tool_result", ChatMessageToolResult.optionalNullable()), }); export declare namespace ChatMessage { export interface Raw { - content?: string | null; + content?: (string | null | undefined) | null; role: Role.Raw; - tool_call?: ToolCallMessage.Raw | null; - tool_result?: ChatMessageToolResult.Raw | null; + tool_call?: (ToolCallMessage.Raw | null | undefined) | null; + tool_result?: (ChatMessageToolResult.Raw | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/types/ChatMetadata.ts b/src/serialization/resources/empathicVoice/types/ChatMetadata.ts index 8eeb6c28..7b18310b 100644 --- a/src/serialization/resources/empathicVoice/types/ChatMetadata.ts +++ b/src/serialization/resources/empathicVoice/types/ChatMetadata.ts @@ -3,6 +3,7 @@ import type * as Hume from "../../../../api/index.js"; import * as core from "../../../../core/index.js"; import type * as serializers from "../../../index.js"; +import { ChatMetadataType } from "./ChatMetadataType.js"; export const ChatMetadata: core.serialization.ObjectSchema< serializers.empathicVoice.ChatMetadata.Raw, @@ -10,17 +11,17 @@ export const ChatMetadata: core.serialization.ObjectSchema< > = core.serialization.object({ chatGroupId: core.serialization.property("chat_group_id", core.serialization.string()), chatId: core.serialization.property("chat_id", core.serialization.string()), - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), - requestId: core.serialization.property("request_id", core.serialization.string().optional()), - type: core.serialization.stringLiteral("chat_metadata"), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), + requestId: core.serialization.property("request_id", core.serialization.string().nullable()), + type: ChatMetadataType, }); export declare namespace ChatMetadata { export interface Raw { chat_group_id: string; chat_id: string; - custom_session_id?: string | null; + custom_session_id?: (string | null | undefined) | null; request_id?: string | null; - type: "chat_metadata"; + type: ChatMetadataType.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/ChatMetadataType.ts b/src/serialization/resources/empathicVoice/types/ChatMetadataType.ts new file mode 100644 index 00000000..0f1fe42a --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/ChatMetadataType.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const ChatMetadataType: core.serialization.Schema< + serializers.empathicVoice.ChatMetadataType.Raw, + Hume.empathicVoice.ChatMetadataType +> = core.serialization.enum_(["chat_metadata"]); + +export declare namespace ChatMetadataType { + export type Raw = "chat_metadata"; +} diff --git a/src/serialization/resources/empathicVoice/types/ConnectSessionSettings.ts b/src/serialization/resources/empathicVoice/types/ConnectSessionSettings.ts index 6e1d8f66..9882be0c 100644 --- a/src/serialization/resources/empathicVoice/types/ConnectSessionSettings.ts +++ b/src/serialization/resources/empathicVoice/types/ConnectSessionSettings.ts @@ -17,8 +17,8 @@ export const ConnectSessionSettings: core.serialization.ObjectSchema< eventLimit: core.serialization.property("event_limit", core.serialization.number().optional()), languageModelApiKey: core.serialization.property("language_model_api_key", core.serialization.string().optional()), systemPrompt: core.serialization.property("system_prompt", core.serialization.string().optional()), - voiceId: core.serialization.property("voice_id", core.serialization.string().optional()), variables: core.serialization.record(core.serialization.string(), ConnectSessionSettingsVariablesValue).optional(), + voiceId: core.serialization.property("voice_id", core.serialization.string().optional()), }); export declare namespace ConnectSessionSettings { @@ -29,7 +29,7 @@ export declare namespace ConnectSessionSettings { event_limit?: number | null; language_model_api_key?: string | null; system_prompt?: string | null; - voice_id?: string | null; variables?: Record | null; + voice_id?: string | null; } } diff --git a/src/serialization/resources/empathicVoice/types/Encoding.ts b/src/serialization/resources/empathicVoice/types/Encoding.ts index 56ee9a1e..f9aa1871 100644 --- a/src/serialization/resources/empathicVoice/types/Encoding.ts +++ b/src/serialization/resources/empathicVoice/types/Encoding.ts @@ -5,7 +5,7 @@ import * as core from "../../../../core/index.js"; import type * as serializers from "../../../index.js"; export const Encoding: core.serialization.Schema = - core.serialization.stringLiteral("linear16"); + core.serialization.enum_(["linear16"]); export declare namespace Encoding { export type Raw = "linear16"; diff --git a/src/serialization/resources/empathicVoice/types/ErrorLevel.ts b/src/serialization/resources/empathicVoice/types/ErrorLevel.ts index bab1ed85..8ab0358c 100644 --- a/src/serialization/resources/empathicVoice/types/ErrorLevel.ts +++ b/src/serialization/resources/empathicVoice/types/ErrorLevel.ts @@ -7,7 +7,7 @@ import type * as serializers from "../../../index.js"; export const ErrorLevel: core.serialization.Schema< serializers.empathicVoice.ErrorLevel.Raw, Hume.empathicVoice.ErrorLevel -> = core.serialization.stringLiteral("warn"); +> = core.serialization.enum_(["warn"]); export declare namespace ErrorLevel { export type Raw = "warn"; diff --git a/src/serialization/resources/empathicVoice/types/ErrorType.ts b/src/serialization/resources/empathicVoice/types/ErrorType.ts new file mode 100644 index 00000000..b21e7653 --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/ErrorType.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const ErrorType: core.serialization.Schema< + serializers.empathicVoice.ErrorType.Raw, + Hume.empathicVoice.ErrorType +> = core.serialization.enum_(["error"]); + +export declare namespace ErrorType { + export type Raw = "error"; +} diff --git a/src/serialization/resources/empathicVoice/types/Inference.ts b/src/serialization/resources/empathicVoice/types/Inference.ts index 4b610614..0d004fae 100644 --- a/src/serialization/resources/empathicVoice/types/Inference.ts +++ b/src/serialization/resources/empathicVoice/types/Inference.ts @@ -9,7 +9,7 @@ export const Inference: core.serialization.ObjectSchema< serializers.empathicVoice.Inference.Raw, Hume.empathicVoice.Inference > = core.serialization.object({ - prosody: ProsodyInference.optional(), + prosody: ProsodyInference.nullable(), }); export declare namespace Inference { diff --git a/src/serialization/resources/empathicVoice/types/JsonMessage.ts b/src/serialization/resources/empathicVoice/types/JsonMessage.ts index 2215f1cb..6572b253 100644 --- a/src/serialization/resources/empathicVoice/types/JsonMessage.ts +++ b/src/serialization/resources/empathicVoice/types/JsonMessage.ts @@ -7,7 +7,6 @@ import { AssistantEnd } from "./AssistantEnd.js"; import { AssistantMessage } from "./AssistantMessage.js"; import { AssistantProsody } from "./AssistantProsody.js"; import { ChatMetadata } from "./ChatMetadata.js"; -import { SessionSettings } from "./SessionSettings.js"; import { ToolCallMessage } from "./ToolCallMessage.js"; import { ToolErrorMessage } from "./ToolErrorMessage.js"; import { ToolResponseMessage } from "./ToolResponseMessage.js"; @@ -29,7 +28,6 @@ export const JsonMessage: core.serialization.Schema< ToolCallMessage, ToolResponseMessage, ToolErrorMessage, - SessionSettings, ]); export declare namespace JsonMessage { @@ -43,6 +41,5 @@ export declare namespace JsonMessage { | UserMessage.Raw | ToolCallMessage.Raw | ToolResponseMessage.Raw - | ToolErrorMessage.Raw - | SessionSettings.Raw; + | ToolErrorMessage.Raw; } diff --git a/src/serialization/resources/empathicVoice/types/PauseAssistantMessage.ts b/src/serialization/resources/empathicVoice/types/PauseAssistantMessage.ts index 7c54151e..e02d1e3b 100644 --- a/src/serialization/resources/empathicVoice/types/PauseAssistantMessage.ts +++ b/src/serialization/resources/empathicVoice/types/PauseAssistantMessage.ts @@ -3,18 +3,19 @@ import type * as Hume from "../../../../api/index.js"; import * as core from "../../../../core/index.js"; import type * as serializers from "../../../index.js"; +import { PauseAssistantMessageType } from "./PauseAssistantMessageType.js"; export const PauseAssistantMessage: core.serialization.ObjectSchema< serializers.empathicVoice.PauseAssistantMessage.Raw, Hume.empathicVoice.PauseAssistantMessage > = core.serialization.object({ - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), - type: core.serialization.stringLiteral("pause_assistant_message"), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), + type: PauseAssistantMessageType, }); export declare namespace PauseAssistantMessage { export interface Raw { - custom_session_id?: string | null; - type: "pause_assistant_message"; + custom_session_id?: (string | null | undefined) | null; + type: PauseAssistantMessageType.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/PauseAssistantMessageType.ts b/src/serialization/resources/empathicVoice/types/PauseAssistantMessageType.ts new file mode 100644 index 00000000..e9f6948e --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/PauseAssistantMessageType.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const PauseAssistantMessageType: core.serialization.Schema< + serializers.empathicVoice.PauseAssistantMessageType.Raw, + Hume.empathicVoice.PauseAssistantMessageType +> = core.serialization.enum_(["pause_assistant_message"]); + +export declare namespace PauseAssistantMessageType { + export type Raw = "pause_assistant_message"; +} diff --git a/src/serialization/resources/empathicVoice/types/PostedBuiltinTool.ts b/src/serialization/resources/empathicVoice/types/PostedBuiltinTool.ts index 6697b373..dce787f7 100644 --- a/src/serialization/resources/empathicVoice/types/PostedBuiltinTool.ts +++ b/src/serialization/resources/empathicVoice/types/PostedBuiltinTool.ts @@ -9,13 +9,13 @@ export const PostedBuiltinTool: core.serialization.ObjectSchema< serializers.empathicVoice.PostedBuiltinTool.Raw, Hume.empathicVoice.PostedBuiltinTool > = core.serialization.object({ - fallbackContent: core.serialization.property("fallback_content", core.serialization.string().optional()), + fallbackContent: core.serialization.property("fallback_content", core.serialization.string().optionalNullable()), name: PostedBuiltinToolName, }); export declare namespace PostedBuiltinTool { export interface Raw { - fallback_content?: string | null; + fallback_content?: (string | null | undefined) | null; name: PostedBuiltinToolName.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/PostedConfigPromptSpec.ts b/src/serialization/resources/empathicVoice/types/PostedConfigPromptSpec.ts index 18518eef..b79c35e6 100644 --- a/src/serialization/resources/empathicVoice/types/PostedConfigPromptSpec.ts +++ b/src/serialization/resources/empathicVoice/types/PostedConfigPromptSpec.ts @@ -8,15 +8,15 @@ export const PostedConfigPromptSpec: core.serialization.ObjectSchema< serializers.empathicVoice.PostedConfigPromptSpec.Raw, Hume.empathicVoice.PostedConfigPromptSpec > = core.serialization.object({ - id: core.serialization.string().optional(), - text: core.serialization.string().optional(), - version: core.serialization.number().optional(), + id: core.serialization.string().optionalNullable(), + text: core.serialization.string().optionalNullable(), + version: core.serialization.number().optionalNullable(), }); export declare namespace PostedConfigPromptSpec { export interface Raw { - id?: string | null; - text?: string | null; - version?: number | null; + id?: (string | null | undefined) | null; + text?: (string | null | undefined) | null; + version?: (number | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/types/PostedEllmModel.ts b/src/serialization/resources/empathicVoice/types/PostedEllmModel.ts index 26d2624b..4b6e9ead 100644 --- a/src/serialization/resources/empathicVoice/types/PostedEllmModel.ts +++ b/src/serialization/resources/empathicVoice/types/PostedEllmModel.ts @@ -8,11 +8,14 @@ export const PostedEllmModel: core.serialization.ObjectSchema< serializers.empathicVoice.PostedEllmModel.Raw, Hume.empathicVoice.PostedEllmModel > = core.serialization.object({ - allowShortResponses: core.serialization.property("allow_short_responses", core.serialization.boolean().optional()), + allowShortResponses: core.serialization.property( + "allow_short_responses", + core.serialization.boolean().optionalNullable(), + ), }); export declare namespace PostedEllmModel { export interface Raw { - allow_short_responses?: boolean | null; + allow_short_responses?: (boolean | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/types/PostedEventMessageSpec.ts b/src/serialization/resources/empathicVoice/types/PostedEventMessageSpec.ts index 13809308..6d0c1ed9 100644 --- a/src/serialization/resources/empathicVoice/types/PostedEventMessageSpec.ts +++ b/src/serialization/resources/empathicVoice/types/PostedEventMessageSpec.ts @@ -9,12 +9,12 @@ export const PostedEventMessageSpec: core.serialization.ObjectSchema< Hume.empathicVoice.PostedEventMessageSpec > = core.serialization.object({ enabled: core.serialization.boolean(), - text: core.serialization.string().optional(), + text: core.serialization.string().optionalNullable(), }); export declare namespace PostedEventMessageSpec { export interface Raw { enabled: boolean; - text?: string | null; + text?: (string | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/types/PostedEventMessageSpecs.ts b/src/serialization/resources/empathicVoice/types/PostedEventMessageSpecs.ts index f25d6e4b..89fbf982 100644 --- a/src/serialization/resources/empathicVoice/types/PostedEventMessageSpecs.ts +++ b/src/serialization/resources/empathicVoice/types/PostedEventMessageSpecs.ts @@ -9,15 +9,21 @@ export const PostedEventMessageSpecs: core.serialization.ObjectSchema< serializers.empathicVoice.PostedEventMessageSpecs.Raw, Hume.empathicVoice.PostedEventMessageSpecs > = core.serialization.object({ - onInactivityTimeout: core.serialization.property("on_inactivity_timeout", PostedEventMessageSpec.optional()), - onMaxDurationTimeout: core.serialization.property("on_max_duration_timeout", PostedEventMessageSpec.optional()), - onNewChat: core.serialization.property("on_new_chat", PostedEventMessageSpec.optional()), + onInactivityTimeout: core.serialization.property( + "on_inactivity_timeout", + PostedEventMessageSpec.optionalNullable(), + ), + onMaxDurationTimeout: core.serialization.property( + "on_max_duration_timeout", + PostedEventMessageSpec.optionalNullable(), + ), + onNewChat: core.serialization.property("on_new_chat", PostedEventMessageSpec.optionalNullable()), }); export declare namespace PostedEventMessageSpecs { export interface Raw { - on_inactivity_timeout?: PostedEventMessageSpec.Raw | null; - on_max_duration_timeout?: PostedEventMessageSpec.Raw | null; - on_new_chat?: PostedEventMessageSpec.Raw | null; + on_inactivity_timeout?: (PostedEventMessageSpec.Raw | null | undefined) | null; + on_max_duration_timeout?: (PostedEventMessageSpec.Raw | null | undefined) | null; + on_new_chat?: (PostedEventMessageSpec.Raw | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/types/PostedLanguageModel.ts b/src/serialization/resources/empathicVoice/types/PostedLanguageModel.ts index 2865b185..050c95c9 100644 --- a/src/serialization/resources/empathicVoice/types/PostedLanguageModel.ts +++ b/src/serialization/resources/empathicVoice/types/PostedLanguageModel.ts @@ -12,13 +12,13 @@ export const PostedLanguageModel: core.serialization.ObjectSchema< > = core.serialization.object({ modelProvider: core.serialization.property("model_provider", ModelProviderEnum.optional()), modelResource: core.serialization.property("model_resource", LanguageModelType.optional()), - temperature: core.serialization.number().optional(), + temperature: core.serialization.number().optionalNullable(), }); export declare namespace PostedLanguageModel { export interface Raw { model_provider?: ModelProviderEnum.Raw | null; model_resource?: LanguageModelType.Raw | null; - temperature?: number | null; + temperature?: (number | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/types/PostedNudgeSpec.ts b/src/serialization/resources/empathicVoice/types/PostedNudgeSpec.ts index 607dbc57..de4cab81 100644 --- a/src/serialization/resources/empathicVoice/types/PostedNudgeSpec.ts +++ b/src/serialization/resources/empathicVoice/types/PostedNudgeSpec.ts @@ -8,13 +8,13 @@ export const PostedNudgeSpec: core.serialization.ObjectSchema< serializers.empathicVoice.PostedNudgeSpec.Raw, Hume.empathicVoice.PostedNudgeSpec > = core.serialization.object({ - enabled: core.serialization.boolean().optional(), - intervalSecs: core.serialization.property("interval_secs", core.serialization.number().optional()), + enabled: core.serialization.boolean().optionalNullable(), + intervalSecs: core.serialization.property("interval_secs", core.serialization.number().optionalNullable()), }); export declare namespace PostedNudgeSpec { export interface Raw { - enabled?: boolean | null; - interval_secs?: number | null; + enabled?: (boolean | null | undefined) | null; + interval_secs?: (number | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/types/PostedTimeoutSpec.ts b/src/serialization/resources/empathicVoice/types/PostedTimeoutSpec.ts index 4513336f..2d94ed3d 100644 --- a/src/serialization/resources/empathicVoice/types/PostedTimeoutSpec.ts +++ b/src/serialization/resources/empathicVoice/types/PostedTimeoutSpec.ts @@ -8,13 +8,13 @@ export const PostedTimeoutSpec: core.serialization.ObjectSchema< serializers.empathicVoice.PostedTimeoutSpec.Raw, Hume.empathicVoice.PostedTimeoutSpec > = core.serialization.object({ - durationSecs: core.serialization.property("duration_secs", core.serialization.number().optional()), + durationSecs: core.serialization.property("duration_secs", core.serialization.number().optionalNullable()), enabled: core.serialization.boolean(), }); export declare namespace PostedTimeoutSpec { export interface Raw { - duration_secs?: number | null; + duration_secs?: (number | null | undefined) | null; enabled: boolean; } } diff --git a/src/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.ts b/src/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.ts index 1ec590ea..74a260fb 100644 --- a/src/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.ts +++ b/src/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.ts @@ -8,13 +8,13 @@ export const PostedTimeoutSpecsInactivity: core.serialization.ObjectSchema< serializers.empathicVoice.PostedTimeoutSpecsInactivity.Raw, Hume.empathicVoice.PostedTimeoutSpecsInactivity > = core.serialization.object({ - durationSecs: core.serialization.property("duration_secs", core.serialization.number().optional()), + durationSecs: core.serialization.property("duration_secs", core.serialization.number().optionalNullable()), enabled: core.serialization.boolean(), }); export declare namespace PostedTimeoutSpecsInactivity { export interface Raw { - duration_secs?: number | null; + duration_secs?: (number | null | undefined) | null; enabled: boolean; } } diff --git a/src/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.ts b/src/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.ts index c5134cbd..504bc48c 100644 --- a/src/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.ts +++ b/src/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.ts @@ -8,13 +8,13 @@ export const PostedTimeoutSpecsMaxDuration: core.serialization.ObjectSchema< serializers.empathicVoice.PostedTimeoutSpecsMaxDuration.Raw, Hume.empathicVoice.PostedTimeoutSpecsMaxDuration > = core.serialization.object({ - durationSecs: core.serialization.property("duration_secs", core.serialization.number().optional()), + durationSecs: core.serialization.property("duration_secs", core.serialization.number().optionalNullable()), enabled: core.serialization.boolean(), }); export declare namespace PostedTimeoutSpecsMaxDuration { export interface Raw { - duration_secs?: number | null; + duration_secs?: (number | null | undefined) | null; enabled: boolean; } } diff --git a/src/serialization/resources/empathicVoice/types/PostedUserDefinedToolSpec.ts b/src/serialization/resources/empathicVoice/types/PostedUserDefinedToolSpec.ts index e9322d9a..663530fa 100644 --- a/src/serialization/resources/empathicVoice/types/PostedUserDefinedToolSpec.ts +++ b/src/serialization/resources/empathicVoice/types/PostedUserDefinedToolSpec.ts @@ -9,12 +9,12 @@ export const PostedUserDefinedToolSpec: core.serialization.ObjectSchema< Hume.empathicVoice.PostedUserDefinedToolSpec > = core.serialization.object({ id: core.serialization.string(), - version: core.serialization.number().optional(), + version: core.serialization.number().optionalNullable(), }); export declare namespace PostedUserDefinedToolSpec { export interface Raw { id: string; - version?: number | null; + version?: (number | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/types/ResumeAssistantMessage.ts b/src/serialization/resources/empathicVoice/types/ResumeAssistantMessage.ts index 79853d3a..2d7274b6 100644 --- a/src/serialization/resources/empathicVoice/types/ResumeAssistantMessage.ts +++ b/src/serialization/resources/empathicVoice/types/ResumeAssistantMessage.ts @@ -3,18 +3,19 @@ import type * as Hume from "../../../../api/index.js"; import * as core from "../../../../core/index.js"; import type * as serializers from "../../../index.js"; +import { ResumeAssistantMessageType } from "./ResumeAssistantMessageType.js"; export const ResumeAssistantMessage: core.serialization.ObjectSchema< serializers.empathicVoice.ResumeAssistantMessage.Raw, Hume.empathicVoice.ResumeAssistantMessage > = core.serialization.object({ - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), - type: core.serialization.stringLiteral("resume_assistant_message"), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), + type: ResumeAssistantMessageType, }); export declare namespace ResumeAssistantMessage { export interface Raw { - custom_session_id?: string | null; - type: "resume_assistant_message"; + custom_session_id?: (string | null | undefined) | null; + type: ResumeAssistantMessageType.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/ResumeAssistantMessageType.ts b/src/serialization/resources/empathicVoice/types/ResumeAssistantMessageType.ts new file mode 100644 index 00000000..4073286e --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/ResumeAssistantMessageType.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const ResumeAssistantMessageType: core.serialization.Schema< + serializers.empathicVoice.ResumeAssistantMessageType.Raw, + Hume.empathicVoice.ResumeAssistantMessageType +> = core.serialization.enum_(["resume_assistant_message"]); + +export declare namespace ResumeAssistantMessageType { + export type Raw = "resume_assistant_message"; +} diff --git a/src/serialization/resources/empathicVoice/types/ReturnBuiltinTool.ts b/src/serialization/resources/empathicVoice/types/ReturnBuiltinTool.ts index 8d141fe6..7dd02ac1 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnBuiltinTool.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnBuiltinTool.ts @@ -9,14 +9,14 @@ export const ReturnBuiltinTool: core.serialization.ObjectSchema< serializers.empathicVoice.ReturnBuiltinTool.Raw, Hume.empathicVoice.ReturnBuiltinTool > = core.serialization.object({ - fallbackContent: core.serialization.property("fallback_content", core.serialization.string().optional()), + fallbackContent: core.serialization.property("fallback_content", core.serialization.string().optionalNullable()), name: core.serialization.string(), toolType: core.serialization.property("tool_type", ReturnBuiltinToolToolType), }); export declare namespace ReturnBuiltinTool { export interface Raw { - fallback_content?: string | null; + fallback_content?: (string | null | undefined) | null; name: string; tool_type: ReturnBuiltinToolToolType.Raw; } diff --git a/src/serialization/resources/empathicVoice/types/ReturnChat.ts b/src/serialization/resources/empathicVoice/types/ReturnChat.ts index 8a4e6efc..efb64edc 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnChat.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnChat.ts @@ -11,11 +11,11 @@ export const ReturnChat: core.serialization.ObjectSchema< Hume.empathicVoice.ReturnChat > = core.serialization.object({ chatGroupId: core.serialization.property("chat_group_id", core.serialization.string()), - config: ReturnConfigSpec.optional(), - endTimestamp: core.serialization.property("end_timestamp", core.serialization.number().optional()), - eventCount: core.serialization.property("event_count", core.serialization.number().optional()), + config: ReturnConfigSpec.optionalNullable(), + endTimestamp: core.serialization.property("end_timestamp", core.serialization.number().optionalNullable()), + eventCount: core.serialization.property("event_count", core.serialization.number().optionalNullable()), id: core.serialization.string(), - metadata: core.serialization.string().optional(), + metadata: core.serialization.string().optionalNullable(), startTimestamp: core.serialization.property("start_timestamp", core.serialization.number()), status: ReturnChatStatus, }); @@ -23,11 +23,11 @@ export const ReturnChat: core.serialization.ObjectSchema< export declare namespace ReturnChat { export interface Raw { chat_group_id: string; - config?: ReturnConfigSpec.Raw | null; - end_timestamp?: number | null; - event_count?: number | null; + config?: (ReturnConfigSpec.Raw | null | undefined) | null; + end_timestamp?: (number | null | undefined) | null; + event_count?: (number | null | undefined) | null; id: string; - metadata?: string | null; + metadata?: (string | null | undefined) | null; start_timestamp: number; status: ReturnChatStatus.Raw; } diff --git a/src/serialization/resources/empathicVoice/types/ReturnChatAudioReconstruction.ts b/src/serialization/resources/empathicVoice/types/ReturnChatAudioReconstruction.ts index 6a8cb978..a06961a2 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnChatAudioReconstruction.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnChatAudioReconstruction.ts @@ -9,13 +9,13 @@ export const ReturnChatAudioReconstruction: core.serialization.ObjectSchema< serializers.empathicVoice.ReturnChatAudioReconstruction.Raw, Hume.empathicVoice.ReturnChatAudioReconstruction > = core.serialization.object({ - filename: core.serialization.string().optional(), + filename: core.serialization.string().optionalNullable(), id: core.serialization.string(), - modifiedAt: core.serialization.property("modified_at", core.serialization.number().optional()), - signedAudioUrl: core.serialization.property("signed_audio_url", core.serialization.string().optional()), + modifiedAt: core.serialization.property("modified_at", core.serialization.number().optionalNullable()), + signedAudioUrl: core.serialization.property("signed_audio_url", core.serialization.string().optionalNullable()), signedUrlExpirationTimestampMillis: core.serialization.property( "signed_url_expiration_timestamp_millis", - core.serialization.number().optional(), + core.serialization.number().optionalNullable(), ), status: ReturnChatAudioReconstructionStatus, userId: core.serialization.property("user_id", core.serialization.string()), @@ -23,11 +23,11 @@ export const ReturnChatAudioReconstruction: core.serialization.ObjectSchema< export declare namespace ReturnChatAudioReconstruction { export interface Raw { - filename?: string | null; + filename?: (string | null | undefined) | null; id: string; - modified_at?: number | null; - signed_audio_url?: string | null; - signed_url_expiration_timestamp_millis?: number | null; + modified_at?: (number | null | undefined) | null; + signed_audio_url?: (string | null | undefined) | null; + signed_url_expiration_timestamp_millis?: (number | null | undefined) | null; status: ReturnChatAudioReconstructionStatus.Raw; user_id: string; } diff --git a/src/serialization/resources/empathicVoice/types/ReturnChatEvent.ts b/src/serialization/resources/empathicVoice/types/ReturnChatEvent.ts index 713459a4..2571c13a 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnChatEvent.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnChatEvent.ts @@ -11,11 +11,11 @@ export const ReturnChatEvent: core.serialization.ObjectSchema< Hume.empathicVoice.ReturnChatEvent > = core.serialization.object({ chatId: core.serialization.property("chat_id", core.serialization.string()), - emotionFeatures: core.serialization.property("emotion_features", core.serialization.string().optional()), + emotionFeatures: core.serialization.property("emotion_features", core.serialization.string().optionalNullable()), id: core.serialization.string(), - messageText: core.serialization.property("message_text", core.serialization.string().optional()), - metadata: core.serialization.string().optional(), - relatedEventId: core.serialization.property("related_event_id", core.serialization.string().optional()), + messageText: core.serialization.property("message_text", core.serialization.string().optionalNullable()), + metadata: core.serialization.string().optionalNullable(), + relatedEventId: core.serialization.property("related_event_id", core.serialization.string().optionalNullable()), role: ReturnChatEventRole, timestamp: core.serialization.number(), type: ReturnChatEventType, @@ -24,11 +24,11 @@ export const ReturnChatEvent: core.serialization.ObjectSchema< export declare namespace ReturnChatEvent { export interface Raw { chat_id: string; - emotion_features?: string | null; + emotion_features?: (string | null | undefined) | null; id: string; - message_text?: string | null; - metadata?: string | null; - related_event_id?: string | null; + message_text?: (string | null | undefined) | null; + metadata?: (string | null | undefined) | null; + related_event_id?: (string | null | undefined) | null; role: ReturnChatEventRole.Raw; timestamp: number; type: ReturnChatEventType.Raw; diff --git a/src/serialization/resources/empathicVoice/types/ReturnChatGroup.ts b/src/serialization/resources/empathicVoice/types/ReturnChatGroup.ts index 94fc86a0..04023394 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnChatGroup.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnChatGroup.ts @@ -12,8 +12,11 @@ export const ReturnChatGroup: core.serialization.ObjectSchema< active: core.serialization.boolean().optional(), firstStartTimestamp: core.serialization.property("first_start_timestamp", core.serialization.number()), id: core.serialization.string(), - mostRecentChatId: core.serialization.property("most_recent_chat_id", core.serialization.string().optional()), - mostRecentConfig: core.serialization.property("most_recent_config", ReturnConfigSpec.optional()), + mostRecentChatId: core.serialization.property( + "most_recent_chat_id", + core.serialization.string().optionalNullable(), + ), + mostRecentConfig: core.serialization.property("most_recent_config", ReturnConfigSpec.optionalNullable()), mostRecentStartTimestamp: core.serialization.property("most_recent_start_timestamp", core.serialization.number()), numChats: core.serialization.property("num_chats", core.serialization.number()), }); @@ -23,8 +26,8 @@ export declare namespace ReturnChatGroup { active?: boolean | null; first_start_timestamp: number; id: string; - most_recent_chat_id?: string | null; - most_recent_config?: ReturnConfigSpec.Raw | null; + most_recent_chat_id?: (string | null | undefined) | null; + most_recent_config?: (ReturnConfigSpec.Raw | null | undefined) | null; most_recent_start_timestamp: number; num_chats: number; } diff --git a/src/serialization/resources/empathicVoice/types/ReturnChatPagedEvents.ts b/src/serialization/resources/empathicVoice/types/ReturnChatPagedEvents.ts index d4f7ca50..9b889045 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnChatPagedEvents.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnChatPagedEvents.ts @@ -13,11 +13,11 @@ export const ReturnChatPagedEvents: core.serialization.ObjectSchema< Hume.empathicVoice.ReturnChatPagedEvents > = core.serialization.object({ chatGroupId: core.serialization.property("chat_group_id", core.serialization.string()), - config: ReturnConfigSpec.optional(), - endTimestamp: core.serialization.property("end_timestamp", core.serialization.number().optional()), + config: ReturnConfigSpec.optionalNullable(), + endTimestamp: core.serialization.property("end_timestamp", core.serialization.number().optionalNullable()), eventsPage: core.serialization.property("events_page", core.serialization.list(ReturnChatEvent)), id: core.serialization.string(), - metadata: core.serialization.string().optional(), + metadata: core.serialization.string().optionalNullable(), pageNumber: core.serialization.property("page_number", core.serialization.number()), pageSize: core.serialization.property("page_size", core.serialization.number()), paginationDirection: core.serialization.property("pagination_direction", ReturnChatPagedEventsPaginationDirection), @@ -29,11 +29,11 @@ export const ReturnChatPagedEvents: core.serialization.ObjectSchema< export declare namespace ReturnChatPagedEvents { export interface Raw { chat_group_id: string; - config?: ReturnConfigSpec.Raw | null; - end_timestamp?: number | null; + config?: (ReturnConfigSpec.Raw | null | undefined) | null; + end_timestamp?: (number | null | undefined) | null; events_page: ReturnChatEvent.Raw[]; id: string; - metadata?: string | null; + metadata?: (string | null | undefined) | null; page_number: number; page_size: number; pagination_direction: ReturnChatPagedEventsPaginationDirection.Raw; diff --git a/src/serialization/resources/empathicVoice/types/ReturnConfig.ts b/src/serialization/resources/empathicVoice/types/ReturnConfig.ts index 55d9ee1a..4d6bc55f 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnConfig.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnConfig.ts @@ -20,44 +20,47 @@ export const ReturnConfig: core.serialization.ObjectSchema< > = core.serialization.object({ builtinTools: core.serialization.property( "builtin_tools", - core.serialization.list(ReturnBuiltinTool.optional()).optional(), + core.serialization.list(ReturnBuiltinTool.nullable()).optionalNullable(), ), createdOn: core.serialization.property("created_on", core.serialization.number().optional()), - ellmModel: core.serialization.property("ellm_model", ReturnEllmModel.optional()), - eventMessages: core.serialization.property("event_messages", ReturnEventMessageSpecs.optional()), + ellmModel: core.serialization.property("ellm_model", ReturnEllmModel.optionalNullable()), + eventMessages: core.serialization.property("event_messages", ReturnEventMessageSpecs.optionalNullable()), eviVersion: core.serialization.property("evi_version", core.serialization.string().optional()), id: core.serialization.string().optional(), - languageModel: core.serialization.property("language_model", ReturnLanguageModel.optional()), + languageModel: core.serialization.property("language_model", ReturnLanguageModel.optionalNullable()), modifiedOn: core.serialization.property("modified_on", core.serialization.number().optional()), name: core.serialization.string().optional(), - nudges: ReturnNudgeSpec.optional(), - prompt: ReturnPrompt.optional(), - timeouts: ReturnTimeoutSpecs.optional(), - tools: core.serialization.list(ReturnUserDefinedTool.optional()).optional(), + nudges: ReturnNudgeSpec.optionalNullable(), + prompt: ReturnPrompt.optionalNullable(), + timeouts: ReturnTimeoutSpecs.optionalNullable(), + tools: core.serialization.list(ReturnUserDefinedTool.nullable()).optionalNullable(), version: core.serialization.number().optional(), - versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), + versionDescription: core.serialization.property( + "version_description", + core.serialization.string().optionalNullable(), + ), voice: ReturnVoice.optional(), - webhooks: core.serialization.list(ReturnWebhookSpec.optional()).optional(), + webhooks: core.serialization.list(ReturnWebhookSpec.nullable()).optionalNullable(), }); export declare namespace ReturnConfig { export interface Raw { - builtin_tools?: (ReturnBuiltinTool.Raw | null | undefined)[] | null; + builtin_tools?: ((ReturnBuiltinTool.Raw | null | undefined)[] | null | undefined) | null; created_on?: number | null; - ellm_model?: ReturnEllmModel.Raw | null; - event_messages?: ReturnEventMessageSpecs.Raw | null; + ellm_model?: (ReturnEllmModel.Raw | null | undefined) | null; + event_messages?: (ReturnEventMessageSpecs.Raw | null | undefined) | null; evi_version?: string | null; id?: string | null; - language_model?: ReturnLanguageModel.Raw | null; + language_model?: (ReturnLanguageModel.Raw | null | undefined) | null; modified_on?: number | null; name?: string | null; - nudges?: ReturnNudgeSpec.Raw | null; - prompt?: ReturnPrompt.Raw | null; - timeouts?: ReturnTimeoutSpecs.Raw | null; - tools?: (ReturnUserDefinedTool.Raw | null | undefined)[] | null; + nudges?: (ReturnNudgeSpec.Raw | null | undefined) | null; + prompt?: (ReturnPrompt.Raw | null | undefined) | null; + timeouts?: (ReturnTimeoutSpecs.Raw | null | undefined) | null; + tools?: ((ReturnUserDefinedTool.Raw | null | undefined)[] | null | undefined) | null; version?: number | null; - version_description?: string | null; + version_description?: (string | null | undefined) | null; voice?: ReturnVoice.Raw | null; - webhooks?: (ReturnWebhookSpec.Raw | null | undefined)[] | null; + webhooks?: ((ReturnWebhookSpec.Raw | null | undefined)[] | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/types/ReturnConfigSpec.ts b/src/serialization/resources/empathicVoice/types/ReturnConfigSpec.ts index e95e0801..f350610e 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnConfigSpec.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnConfigSpec.ts @@ -9,12 +9,12 @@ export const ReturnConfigSpec: core.serialization.ObjectSchema< Hume.empathicVoice.ReturnConfigSpec > = core.serialization.object({ id: core.serialization.string(), - version: core.serialization.number().optional(), + version: core.serialization.number().optionalNullable(), }); export declare namespace ReturnConfigSpec { export interface Raw { id: string; - version?: number | null; + version?: (number | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/types/ReturnEventMessageSpec.ts b/src/serialization/resources/empathicVoice/types/ReturnEventMessageSpec.ts index 439abfb9..c2acc8be 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnEventMessageSpec.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnEventMessageSpec.ts @@ -9,12 +9,12 @@ export const ReturnEventMessageSpec: core.serialization.ObjectSchema< Hume.empathicVoice.ReturnEventMessageSpec > = core.serialization.object({ enabled: core.serialization.boolean(), - text: core.serialization.string().optional(), + text: core.serialization.string().optionalNullable(), }); export declare namespace ReturnEventMessageSpec { export interface Raw { enabled: boolean; - text?: string | null; + text?: (string | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/types/ReturnEventMessageSpecs.ts b/src/serialization/resources/empathicVoice/types/ReturnEventMessageSpecs.ts index ce38d667..31b6b237 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnEventMessageSpecs.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnEventMessageSpecs.ts @@ -9,15 +9,21 @@ export const ReturnEventMessageSpecs: core.serialization.ObjectSchema< serializers.empathicVoice.ReturnEventMessageSpecs.Raw, Hume.empathicVoice.ReturnEventMessageSpecs > = core.serialization.object({ - onInactivityTimeout: core.serialization.property("on_inactivity_timeout", ReturnEventMessageSpec.optional()), - onMaxDurationTimeout: core.serialization.property("on_max_duration_timeout", ReturnEventMessageSpec.optional()), - onNewChat: core.serialization.property("on_new_chat", ReturnEventMessageSpec.optional()), + onInactivityTimeout: core.serialization.property( + "on_inactivity_timeout", + ReturnEventMessageSpec.optionalNullable(), + ), + onMaxDurationTimeout: core.serialization.property( + "on_max_duration_timeout", + ReturnEventMessageSpec.optionalNullable(), + ), + onNewChat: core.serialization.property("on_new_chat", ReturnEventMessageSpec.optionalNullable()), }); export declare namespace ReturnEventMessageSpecs { export interface Raw { - on_inactivity_timeout?: ReturnEventMessageSpec.Raw | null; - on_max_duration_timeout?: ReturnEventMessageSpec.Raw | null; - on_new_chat?: ReturnEventMessageSpec.Raw | null; + on_inactivity_timeout?: (ReturnEventMessageSpec.Raw | null | undefined) | null; + on_max_duration_timeout?: (ReturnEventMessageSpec.Raw | null | undefined) | null; + on_new_chat?: (ReturnEventMessageSpec.Raw | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/types/ReturnLanguageModel.ts b/src/serialization/resources/empathicVoice/types/ReturnLanguageModel.ts index 4d31ca73..b3302025 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnLanguageModel.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnLanguageModel.ts @@ -12,13 +12,13 @@ export const ReturnLanguageModel: core.serialization.ObjectSchema< > = core.serialization.object({ modelProvider: core.serialization.property("model_provider", ModelProviderEnum.optional()), modelResource: core.serialization.property("model_resource", LanguageModelType.optional()), - temperature: core.serialization.number().optional(), + temperature: core.serialization.number().optionalNullable(), }); export declare namespace ReturnLanguageModel { export interface Raw { model_provider?: ModelProviderEnum.Raw | null; model_resource?: LanguageModelType.Raw | null; - temperature?: number | null; + temperature?: (number | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/types/ReturnNudgeSpec.ts b/src/serialization/resources/empathicVoice/types/ReturnNudgeSpec.ts index 387668f6..b73b0096 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnNudgeSpec.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnNudgeSpec.ts @@ -9,12 +9,12 @@ export const ReturnNudgeSpec: core.serialization.ObjectSchema< Hume.empathicVoice.ReturnNudgeSpec > = core.serialization.object({ enabled: core.serialization.boolean(), - intervalSecs: core.serialization.property("interval_secs", core.serialization.number().optional()), + intervalSecs: core.serialization.property("interval_secs", core.serialization.number().optionalNullable()), }); export declare namespace ReturnNudgeSpec { export interface Raw { enabled: boolean; - interval_secs?: number | null; + interval_secs?: (number | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/types/ReturnPagedPrompts.ts b/src/serialization/resources/empathicVoice/types/ReturnPagedPrompts.ts index cc4b92d9..2ea52e95 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnPagedPrompts.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnPagedPrompts.ts @@ -11,7 +11,7 @@ export const ReturnPagedPrompts: core.serialization.ObjectSchema< > = core.serialization.object({ pageNumber: core.serialization.property("page_number", core.serialization.number()), pageSize: core.serialization.property("page_size", core.serialization.number()), - promptsPage: core.serialization.property("prompts_page", core.serialization.list(ReturnPrompt.optional())), + promptsPage: core.serialization.property("prompts_page", core.serialization.list(ReturnPrompt.nullable())), totalPages: core.serialization.property("total_pages", core.serialization.number()), }); diff --git a/src/serialization/resources/empathicVoice/types/ReturnPagedUserDefinedTools.ts b/src/serialization/resources/empathicVoice/types/ReturnPagedUserDefinedTools.ts index 7bc2f41b..5bb1723d 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnPagedUserDefinedTools.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnPagedUserDefinedTools.ts @@ -11,7 +11,7 @@ export const ReturnPagedUserDefinedTools: core.serialization.ObjectSchema< > = core.serialization.object({ pageNumber: core.serialization.property("page_number", core.serialization.number()), pageSize: core.serialization.property("page_size", core.serialization.number()), - toolsPage: core.serialization.property("tools_page", core.serialization.list(ReturnUserDefinedTool.optional())), + toolsPage: core.serialization.property("tools_page", core.serialization.list(ReturnUserDefinedTool.nullable())), totalPages: core.serialization.property("total_pages", core.serialization.number()), }); diff --git a/src/serialization/resources/empathicVoice/types/ReturnPrompt.ts b/src/serialization/resources/empathicVoice/types/ReturnPrompt.ts index 65757d38..0a1612c5 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnPrompt.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnPrompt.ts @@ -14,7 +14,10 @@ export const ReturnPrompt: core.serialization.ObjectSchema< name: core.serialization.string(), text: core.serialization.string(), version: core.serialization.number(), - versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), + versionDescription: core.serialization.property( + "version_description", + core.serialization.string().optionalNullable(), + ), versionType: core.serialization.property("version_type", core.serialization.string()), }); @@ -26,7 +29,7 @@ export declare namespace ReturnPrompt { name: string; text: string; version: number; - version_description?: string | null; + version_description?: (string | null | undefined) | null; version_type: string; } } diff --git a/src/serialization/resources/empathicVoice/types/ReturnTimeoutSpec.ts b/src/serialization/resources/empathicVoice/types/ReturnTimeoutSpec.ts index 6222f0a2..7d806cfc 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnTimeoutSpec.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnTimeoutSpec.ts @@ -8,13 +8,13 @@ export const ReturnTimeoutSpec: core.serialization.ObjectSchema< serializers.empathicVoice.ReturnTimeoutSpec.Raw, Hume.empathicVoice.ReturnTimeoutSpec > = core.serialization.object({ - durationSecs: core.serialization.property("duration_secs", core.serialization.number().optional()), + durationSecs: core.serialization.property("duration_secs", core.serialization.number().optionalNullable()), enabled: core.serialization.boolean(), }); export declare namespace ReturnTimeoutSpec { export interface Raw { - duration_secs?: number | null; + duration_secs?: (number | null | undefined) | null; enabled: boolean; } } diff --git a/src/serialization/resources/empathicVoice/types/ReturnUserDefinedTool.ts b/src/serialization/resources/empathicVoice/types/ReturnUserDefinedTool.ts index e6c09160..820a0115 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnUserDefinedTool.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnUserDefinedTool.ts @@ -11,30 +11,33 @@ export const ReturnUserDefinedTool: core.serialization.ObjectSchema< Hume.empathicVoice.ReturnUserDefinedTool > = core.serialization.object({ createdOn: core.serialization.property("created_on", core.serialization.number()), - description: core.serialization.string().optional(), - fallbackContent: core.serialization.property("fallback_content", core.serialization.string().optional()), + description: core.serialization.string().optionalNullable(), + fallbackContent: core.serialization.property("fallback_content", core.serialization.string().optionalNullable()), id: core.serialization.string(), modifiedOn: core.serialization.property("modified_on", core.serialization.number()), name: core.serialization.string(), parameters: core.serialization.string(), toolType: core.serialization.property("tool_type", ReturnUserDefinedToolToolType), version: core.serialization.number(), - versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), + versionDescription: core.serialization.property( + "version_description", + core.serialization.string().optionalNullable(), + ), versionType: core.serialization.property("version_type", ReturnUserDefinedToolVersionType), }); export declare namespace ReturnUserDefinedTool { export interface Raw { created_on: number; - description?: string | null; - fallback_content?: string | null; + description?: (string | null | undefined) | null; + fallback_content?: (string | null | undefined) | null; id: string; modified_on: number; name: string; parameters: string; tool_type: ReturnUserDefinedToolToolType.Raw; version: number; - version_description?: string | null; + version_description?: (string | null | undefined) | null; version_type: ReturnUserDefinedToolVersionType.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/SessionSettings.ts b/src/serialization/resources/empathicVoice/types/SessionSettings.ts index a8e73295..86be1775 100644 --- a/src/serialization/resources/empathicVoice/types/SessionSettings.ts +++ b/src/serialization/resources/empathicVoice/types/SessionSettings.ts @@ -6,6 +6,7 @@ import type * as serializers from "../../../index.js"; import { AudioConfiguration } from "./AudioConfiguration.js"; import { BuiltinToolConfig } from "./BuiltinToolConfig.js"; import { Context } from "./Context.js"; +import { SessionSettingsType } from "./SessionSettingsType.js"; import { SessionSettingsVariablesValue } from "./SessionSettingsVariablesValue.js"; import { Tool } from "./Tool.js"; @@ -13,31 +14,37 @@ export const SessionSettings: core.serialization.ObjectSchema< serializers.empathicVoice.SessionSettings.Raw, Hume.empathicVoice.SessionSettings > = core.serialization.object({ - audio: AudioConfiguration.optional(), - builtinTools: core.serialization.property("builtin_tools", core.serialization.list(BuiltinToolConfig).optional()), - context: Context.optional(), - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), - languageModelApiKey: core.serialization.property("language_model_api_key", core.serialization.string().optional()), - metadata: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), - systemPrompt: core.serialization.property("system_prompt", core.serialization.string().optional()), - tools: core.serialization.list(Tool).optional(), - type: core.serialization.stringLiteral("session_settings"), + audio: AudioConfiguration.optionalNullable(), + builtinTools: core.serialization.property( + "builtin_tools", + core.serialization.list(BuiltinToolConfig).optionalNullable(), + ), + context: Context.optionalNullable(), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), + languageModelApiKey: core.serialization.property( + "language_model_api_key", + core.serialization.string().optionalNullable(), + ), + metadata: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optionalNullable(), + systemPrompt: core.serialization.property("system_prompt", core.serialization.string().optionalNullable()), + tools: core.serialization.list(Tool).optionalNullable(), + type: SessionSettingsType, variables: core.serialization.record(core.serialization.string(), SessionSettingsVariablesValue).optional(), - voiceId: core.serialization.property("voice_id", core.serialization.string().optional()), + voiceId: core.serialization.property("voice_id", core.serialization.string().optionalNullable()), }); export declare namespace SessionSettings { export interface Raw { - audio?: AudioConfiguration.Raw | null; - builtin_tools?: BuiltinToolConfig.Raw[] | null; - context?: Context.Raw | null; - custom_session_id?: string | null; - language_model_api_key?: string | null; - metadata?: Record | null; - system_prompt?: string | null; - tools?: Tool.Raw[] | null; - type: "session_settings"; + audio?: (AudioConfiguration.Raw | null | undefined) | null; + builtin_tools?: (BuiltinToolConfig.Raw[] | null | undefined) | null; + context?: (Context.Raw | null | undefined) | null; + custom_session_id?: (string | null | undefined) | null; + language_model_api_key?: (string | null | undefined) | null; + metadata?: (Record | null | undefined) | null; + system_prompt?: (string | null | undefined) | null; + tools?: (Tool.Raw[] | null | undefined) | null; + type: SessionSettingsType.Raw; variables?: Record | null; - voice_id?: string | null; + voice_id?: (string | null | undefined) | null; } } diff --git a/src/serialization/resources/empathicVoice/types/SessionSettingsType.ts b/src/serialization/resources/empathicVoice/types/SessionSettingsType.ts new file mode 100644 index 00000000..ea4d2d79 --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/SessionSettingsType.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const SessionSettingsType: core.serialization.Schema< + serializers.empathicVoice.SessionSettingsType.Raw, + Hume.empathicVoice.SessionSettingsType +> = core.serialization.enum_(["session_settings"]); + +export declare namespace SessionSettingsType { + export type Raw = "session_settings"; +} diff --git a/src/serialization/resources/empathicVoice/types/SubscribeEvent.ts b/src/serialization/resources/empathicVoice/types/SubscribeEvent.ts index 1f1d0912..61f3838f 100644 --- a/src/serialization/resources/empathicVoice/types/SubscribeEvent.ts +++ b/src/serialization/resources/empathicVoice/types/SubscribeEvent.ts @@ -8,7 +8,6 @@ import { AssistantMessage } from "./AssistantMessage.js"; import { AssistantProsody } from "./AssistantProsody.js"; import { AudioOutput } from "./AudioOutput.js"; import { ChatMetadata } from "./ChatMetadata.js"; -import { SessionSettings } from "./SessionSettings.js"; import { ToolCallMessage } from "./ToolCallMessage.js"; import { ToolErrorMessage } from "./ToolErrorMessage.js"; import { ToolResponseMessage } from "./ToolResponseMessage.js"; @@ -31,7 +30,6 @@ export const SubscribeEvent: core.serialization.Schema< ToolCallMessage, ToolResponseMessage, ToolErrorMessage, - SessionSettings, ]); export declare namespace SubscribeEvent { @@ -46,6 +44,5 @@ export declare namespace SubscribeEvent { | UserMessage.Raw | ToolCallMessage.Raw | ToolResponseMessage.Raw - | ToolErrorMessage.Raw - | SessionSettings.Raw; + | ToolErrorMessage.Raw; } diff --git a/src/serialization/resources/empathicVoice/types/Tool.ts b/src/serialization/resources/empathicVoice/types/Tool.ts index 32eeabc0..a1b2f0c1 100644 --- a/src/serialization/resources/empathicVoice/types/Tool.ts +++ b/src/serialization/resources/empathicVoice/types/Tool.ts @@ -7,8 +7,11 @@ import { ToolType } from "./ToolType.js"; export const Tool: core.serialization.ObjectSchema = core.serialization.object({ - description: core.serialization.string().optional(), - fallbackContent: core.serialization.property("fallback_content", core.serialization.string().optional()), + description: core.serialization.string().optionalNullable(), + fallbackContent: core.serialization.property( + "fallback_content", + core.serialization.string().optionalNullable(), + ), name: core.serialization.string(), parameters: core.serialization.string(), type: ToolType, @@ -16,8 +19,8 @@ export const Tool: core.serialization.ObjectSchema = core.serialization.object({ - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), name: core.serialization.string(), parameters: core.serialization.string(), responseRequired: core.serialization.property("response_required", core.serialization.boolean()), toolCallId: core.serialization.property("tool_call_id", core.serialization.string()), toolType: core.serialization.property("tool_type", ToolType), - type: core.serialization.stringLiteral("tool_call").optional(), + type: ToolCallMessageType.optional(), }); export declare namespace ToolCallMessage { export interface Raw { - custom_session_id?: string | null; + custom_session_id?: (string | null | undefined) | null; name: string; parameters: string; response_required: boolean; tool_call_id: string; tool_type: ToolType.Raw; - type?: "tool_call" | null; + type?: ToolCallMessageType.Raw | null; } } diff --git a/src/serialization/resources/empathicVoice/types/ToolCallMessageType.ts b/src/serialization/resources/empathicVoice/types/ToolCallMessageType.ts new file mode 100644 index 00000000..ea0a3275 --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/ToolCallMessageType.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const ToolCallMessageType: core.serialization.Schema< + serializers.empathicVoice.ToolCallMessageType.Raw, + Hume.empathicVoice.ToolCallMessageType +> = core.serialization.enum_(["tool_call"]); + +export declare namespace ToolCallMessageType { + export type Raw = "tool_call"; +} diff --git a/src/serialization/resources/empathicVoice/types/ToolErrorMessage.ts b/src/serialization/resources/empathicVoice/types/ToolErrorMessage.ts index b95b6824..629b6dca 100644 --- a/src/serialization/resources/empathicVoice/types/ToolErrorMessage.ts +++ b/src/serialization/resources/empathicVoice/types/ToolErrorMessage.ts @@ -4,31 +4,32 @@ import type * as Hume from "../../../../api/index.js"; import * as core from "../../../../core/index.js"; import type * as serializers from "../../../index.js"; import { ErrorLevel } from "./ErrorLevel.js"; +import { ToolErrorMessageType } from "./ToolErrorMessageType.js"; import { ToolType } from "./ToolType.js"; export const ToolErrorMessage: core.serialization.ObjectSchema< serializers.empathicVoice.ToolErrorMessage.Raw, Hume.empathicVoice.ToolErrorMessage > = core.serialization.object({ - code: core.serialization.string().optional(), - content: core.serialization.string().optional(), - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), + code: core.serialization.string().optionalNullable(), + content: core.serialization.string().optionalNullable(), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), error: core.serialization.string(), - level: ErrorLevel.optional(), + level: ErrorLevel.optionalNullable(), toolCallId: core.serialization.property("tool_call_id", core.serialization.string()), - toolType: core.serialization.property("tool_type", ToolType.optional()), - type: core.serialization.stringLiteral("tool_error"), + toolType: core.serialization.property("tool_type", ToolType.optionalNullable()), + type: ToolErrorMessageType, }); export declare namespace ToolErrorMessage { export interface Raw { - code?: string | null; - content?: string | null; - custom_session_id?: string | null; + code?: (string | null | undefined) | null; + content?: (string | null | undefined) | null; + custom_session_id?: (string | null | undefined) | null; error: string; - level?: ErrorLevel.Raw | null; + level?: (ErrorLevel.Raw | null | undefined) | null; tool_call_id: string; - tool_type?: ToolType.Raw | null; - type: "tool_error"; + tool_type?: (ToolType.Raw | null | undefined) | null; + type: ToolErrorMessageType.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/ToolErrorMessageType.ts b/src/serialization/resources/empathicVoice/types/ToolErrorMessageType.ts new file mode 100644 index 00000000..7d1aaf7d --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/ToolErrorMessageType.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const ToolErrorMessageType: core.serialization.Schema< + serializers.empathicVoice.ToolErrorMessageType.Raw, + Hume.empathicVoice.ToolErrorMessageType +> = core.serialization.enum_(["tool_error"]); + +export declare namespace ToolErrorMessageType { + export type Raw = "tool_error"; +} diff --git a/src/serialization/resources/empathicVoice/types/ToolResponseMessage.ts b/src/serialization/resources/empathicVoice/types/ToolResponseMessage.ts index c518bfa2..32ae1da2 100644 --- a/src/serialization/resources/empathicVoice/types/ToolResponseMessage.ts +++ b/src/serialization/resources/empathicVoice/types/ToolResponseMessage.ts @@ -3,6 +3,7 @@ import type * as Hume from "../../../../api/index.js"; import * as core from "../../../../core/index.js"; import type * as serializers from "../../../index.js"; +import { ToolResponseMessageType } from "./ToolResponseMessageType.js"; import { ToolType } from "./ToolType.js"; export const ToolResponseMessage: core.serialization.ObjectSchema< @@ -10,20 +11,20 @@ export const ToolResponseMessage: core.serialization.ObjectSchema< Hume.empathicVoice.ToolResponseMessage > = core.serialization.object({ content: core.serialization.string(), - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), toolCallId: core.serialization.property("tool_call_id", core.serialization.string()), - toolName: core.serialization.property("tool_name", core.serialization.string().optional()), - toolType: core.serialization.property("tool_type", ToolType.optional()), - type: core.serialization.stringLiteral("tool_response"), + toolName: core.serialization.property("tool_name", core.serialization.string().optionalNullable()), + toolType: core.serialization.property("tool_type", ToolType.optionalNullable()), + type: ToolResponseMessageType, }); export declare namespace ToolResponseMessage { export interface Raw { content: string; - custom_session_id?: string | null; + custom_session_id?: (string | null | undefined) | null; tool_call_id: string; - tool_name?: string | null; - tool_type?: ToolType.Raw | null; - type: "tool_response"; + tool_name?: (string | null | undefined) | null; + tool_type?: (ToolType.Raw | null | undefined) | null; + type: ToolResponseMessageType.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/ToolResponseMessageType.ts b/src/serialization/resources/empathicVoice/types/ToolResponseMessageType.ts new file mode 100644 index 00000000..b59ccd61 --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/ToolResponseMessageType.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const ToolResponseMessageType: core.serialization.Schema< + serializers.empathicVoice.ToolResponseMessageType.Raw, + Hume.empathicVoice.ToolResponseMessageType +> = core.serialization.enum_(["tool_response"]); + +export declare namespace ToolResponseMessageType { + export type Raw = "tool_response"; +} diff --git a/src/serialization/resources/empathicVoice/types/UserInput.ts b/src/serialization/resources/empathicVoice/types/UserInput.ts index 188e83fe..53049190 100644 --- a/src/serialization/resources/empathicVoice/types/UserInput.ts +++ b/src/serialization/resources/empathicVoice/types/UserInput.ts @@ -3,20 +3,21 @@ import type * as Hume from "../../../../api/index.js"; import * as core from "../../../../core/index.js"; import type * as serializers from "../../../index.js"; +import { UserInputType } from "./UserInputType.js"; export const UserInput: core.serialization.ObjectSchema< serializers.empathicVoice.UserInput.Raw, Hume.empathicVoice.UserInput > = core.serialization.object({ - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), text: core.serialization.string(), - type: core.serialization.stringLiteral("user_input"), + type: UserInputType, }); export declare namespace UserInput { export interface Raw { - custom_session_id?: string | null; + custom_session_id?: (string | null | undefined) | null; text: string; - type: "user_input"; + type: UserInputType.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/UserInputType.ts b/src/serialization/resources/empathicVoice/types/UserInputType.ts new file mode 100644 index 00000000..bbf65415 --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/UserInputType.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const UserInputType: core.serialization.Schema< + serializers.empathicVoice.UserInputType.Raw, + Hume.empathicVoice.UserInputType +> = core.serialization.enum_(["user_input"]); + +export declare namespace UserInputType { + export type Raw = "user_input"; +} diff --git a/src/serialization/resources/empathicVoice/types/UserInterruption.ts b/src/serialization/resources/empathicVoice/types/UserInterruption.ts index ecce608e..98a73e72 100644 --- a/src/serialization/resources/empathicVoice/types/UserInterruption.ts +++ b/src/serialization/resources/empathicVoice/types/UserInterruption.ts @@ -3,20 +3,21 @@ import type * as Hume from "../../../../api/index.js"; import * as core from "../../../../core/index.js"; import type * as serializers from "../../../index.js"; +import { UserInterruptionType } from "./UserInterruptionType.js"; export const UserInterruption: core.serialization.ObjectSchema< serializers.empathicVoice.UserInterruption.Raw, Hume.empathicVoice.UserInterruption > = core.serialization.object({ - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), time: core.serialization.number(), - type: core.serialization.stringLiteral("user_interruption"), + type: UserInterruptionType, }); export declare namespace UserInterruption { export interface Raw { - custom_session_id?: string | null; + custom_session_id?: (string | null | undefined) | null; time: number; - type: "user_interruption"; + type: UserInterruptionType.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/UserInterruptionType.ts b/src/serialization/resources/empathicVoice/types/UserInterruptionType.ts new file mode 100644 index 00000000..29e5467c --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/UserInterruptionType.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const UserInterruptionType: core.serialization.Schema< + serializers.empathicVoice.UserInterruptionType.Raw, + Hume.empathicVoice.UserInterruptionType +> = core.serialization.enum_(["user_interruption"]); + +export declare namespace UserInterruptionType { + export type Raw = "user_interruption"; +} diff --git a/src/serialization/resources/empathicVoice/types/UserMessage.ts b/src/serialization/resources/empathicVoice/types/UserMessage.ts index 83656ead..05b64e9f 100644 --- a/src/serialization/resources/empathicVoice/types/UserMessage.ts +++ b/src/serialization/resources/empathicVoice/types/UserMessage.ts @@ -6,30 +6,31 @@ import type * as serializers from "../../../index.js"; import { ChatMessage } from "./ChatMessage.js"; import { Inference } from "./Inference.js"; import { MillisecondInterval } from "./MillisecondInterval.js"; +import { UserMessageType } from "./UserMessageType.js"; export const UserMessage: core.serialization.ObjectSchema< serializers.empathicVoice.UserMessage.Raw, Hume.empathicVoice.UserMessage > = core.serialization.object({ - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), fromText: core.serialization.property("from_text", core.serialization.boolean()), interim: core.serialization.boolean(), - language: core.serialization.string().optional(), + language: core.serialization.string().optionalNullable(), message: ChatMessage, models: Inference, time: MillisecondInterval, - type: core.serialization.stringLiteral("user_message"), + type: UserMessageType, }); export declare namespace UserMessage { export interface Raw { - custom_session_id?: string | null; + custom_session_id?: (string | null | undefined) | null; from_text: boolean; interim: boolean; - language?: string | null; + language?: (string | null | undefined) | null; message: ChatMessage.Raw; models: Inference.Raw; time: MillisecondInterval.Raw; - type: "user_message"; + type: UserMessageType.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/UserMessageType.ts b/src/serialization/resources/empathicVoice/types/UserMessageType.ts new file mode 100644 index 00000000..981cca84 --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/UserMessageType.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const UserMessageType: core.serialization.Schema< + serializers.empathicVoice.UserMessageType.Raw, + Hume.empathicVoice.UserMessageType +> = core.serialization.enum_(["user_message"]); + +export declare namespace UserMessageType { + export type Raw = "user_message"; +} diff --git a/src/serialization/resources/empathicVoice/types/WebSocketError.ts b/src/serialization/resources/empathicVoice/types/WebSocketError.ts index f7b6075a..757a42d9 100644 --- a/src/serialization/resources/empathicVoice/types/WebSocketError.ts +++ b/src/serialization/resources/empathicVoice/types/WebSocketError.ts @@ -3,26 +3,27 @@ import type * as Hume from "../../../../api/index.js"; import * as core from "../../../../core/index.js"; import type * as serializers from "../../../index.js"; +import { ErrorType } from "./ErrorType.js"; export const WebSocketError: core.serialization.ObjectSchema< serializers.empathicVoice.WebSocketError.Raw, Hume.empathicVoice.WebSocketError > = core.serialization.object({ code: core.serialization.string(), - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optionalNullable()), message: core.serialization.string(), - requestId: core.serialization.property("request_id", core.serialization.string().optional()), + requestId: core.serialization.property("request_id", core.serialization.string().optionalNullable()), slug: core.serialization.string(), - type: core.serialization.stringLiteral("error"), + type: ErrorType, }); export declare namespace WebSocketError { export interface Raw { code: string; - custom_session_id?: string | null; + custom_session_id?: (string | null | undefined) | null; message: string; - request_id?: string | null; + request_id?: (string | null | undefined) | null; slug: string; - type: "error"; + type: ErrorType.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/WebhookEventBase.ts b/src/serialization/resources/empathicVoice/types/WebhookBaseEvent.ts similarity index 73% rename from src/serialization/resources/empathicVoice/types/WebhookEventBase.ts rename to src/serialization/resources/empathicVoice/types/WebhookBaseEvent.ts index 62e045ce..6d9bf5a4 100644 --- a/src/serialization/resources/empathicVoice/types/WebhookEventBase.ts +++ b/src/serialization/resources/empathicVoice/types/WebhookBaseEvent.ts @@ -4,16 +4,16 @@ import type * as Hume from "../../../../api/index.js"; import * as core from "../../../../core/index.js"; import type * as serializers from "../../../index.js"; -export const WebhookEventBase: core.serialization.ObjectSchema< - serializers.empathicVoice.WebhookEventBase.Raw, - Hume.empathicVoice.WebhookEventBase +export const WebhookBaseEvent: core.serialization.ObjectSchema< + serializers.empathicVoice.WebhookBaseEvent.Raw, + Hume.empathicVoice.WebhookBaseEvent > = core.serialization.object({ chatGroupId: core.serialization.property("chat_group_id", core.serialization.string()), chatId: core.serialization.property("chat_id", core.serialization.string()), - configId: core.serialization.property("config_id", core.serialization.string().optional()), + configId: core.serialization.property("config_id", core.serialization.string().nullable()), }); -export declare namespace WebhookEventBase { +export declare namespace WebhookBaseEvent { export interface Raw { chat_group_id: string; chat_id: string; diff --git a/src/serialization/resources/empathicVoice/types/WebhookEventChatEnded.ts b/src/serialization/resources/empathicVoice/types/WebhookEventChatEnded.ts index 89aab57a..5742a567 100644 --- a/src/serialization/resources/empathicVoice/types/WebhookEventChatEnded.ts +++ b/src/serialization/resources/empathicVoice/types/WebhookEventChatEnded.ts @@ -3,7 +3,8 @@ import type * as Hume from "../../../../api/index.js"; import * as core from "../../../../core/index.js"; import type * as serializers from "../../../index.js"; -import { WebhookEventBase } from "./WebhookEventBase.js"; +import { WebhookBaseEvent } from "./WebhookBaseEvent.js"; +import { WebhookEventChatEndedEventName } from "./WebhookEventChatEndedEventName.js"; import { WebhookEventChatStatus } from "./WebhookEventChatStatus.js"; export const WebhookEventChatEnded: core.serialization.ObjectSchema< @@ -11,22 +12,22 @@ export const WebhookEventChatEnded: core.serialization.ObjectSchema< Hume.empathicVoice.WebhookEventChatEnded > = core.serialization .object({ - callerNumber: core.serialization.property("caller_number", core.serialization.string().optional()), - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), + callerNumber: core.serialization.property("caller_number", core.serialization.string().nullable()), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().nullable()), durationSeconds: core.serialization.property("duration_seconds", core.serialization.number()), endReason: core.serialization.property("end_reason", WebhookEventChatStatus), endTime: core.serialization.property("end_time", core.serialization.number()), - eventName: core.serialization.property("event_name", core.serialization.stringLiteral("chat_ended").optional()), + eventName: core.serialization.property("event_name", WebhookEventChatEndedEventName.optional()), }) - .extend(WebhookEventBase); + .extend(WebhookBaseEvent); export declare namespace WebhookEventChatEnded { - export interface Raw extends WebhookEventBase.Raw { + export interface Raw extends WebhookBaseEvent.Raw { caller_number?: string | null; custom_session_id?: string | null; duration_seconds: number; end_reason: WebhookEventChatStatus.Raw; end_time: number; - event_name?: "chat_ended" | null; + event_name?: WebhookEventChatEndedEventName.Raw | null; } } diff --git a/src/serialization/resources/empathicVoice/types/WebhookEventChatEndedEventName.ts b/src/serialization/resources/empathicVoice/types/WebhookEventChatEndedEventName.ts new file mode 100644 index 00000000..88593f77 --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/WebhookEventChatEndedEventName.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const WebhookEventChatEndedEventName: core.serialization.Schema< + serializers.empathicVoice.WebhookEventChatEndedEventName.Raw, + Hume.empathicVoice.WebhookEventChatEndedEventName +> = core.serialization.enum_(["chat_ended"]); + +export declare namespace WebhookEventChatEndedEventName { + export type Raw = "chat_ended"; +} diff --git a/src/serialization/resources/empathicVoice/types/WebhookEventChatStarted.ts b/src/serialization/resources/empathicVoice/types/WebhookEventChatStarted.ts index 792f31de..812d6b2b 100644 --- a/src/serialization/resources/empathicVoice/types/WebhookEventChatStarted.ts +++ b/src/serialization/resources/empathicVoice/types/WebhookEventChatStarted.ts @@ -3,7 +3,8 @@ import type * as Hume from "../../../../api/index.js"; import * as core from "../../../../core/index.js"; import type * as serializers from "../../../index.js"; -import { WebhookEventBase } from "./WebhookEventBase.js"; +import { WebhookBaseEvent } from "./WebhookBaseEvent.js"; +import { WebhookEventChatStartedEventName } from "./WebhookEventChatStartedEventName.js"; import { WebhookEventChatStartType } from "./WebhookEventChatStartType.js"; export const WebhookEventChatStarted: core.serialization.ObjectSchema< @@ -11,23 +12,20 @@ export const WebhookEventChatStarted: core.serialization.ObjectSchema< Hume.empathicVoice.WebhookEventChatStarted > = core.serialization .object({ - callerNumber: core.serialization.property("caller_number", core.serialization.string().optional()), + callerNumber: core.serialization.property("caller_number", core.serialization.string().nullable()), chatStartType: core.serialization.property("chat_start_type", WebhookEventChatStartType), - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), - eventName: core.serialization.property( - "event_name", - core.serialization.stringLiteral("chat_started").optional(), - ), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().nullable()), + eventName: core.serialization.property("event_name", WebhookEventChatStartedEventName.optional()), startTime: core.serialization.property("start_time", core.serialization.number()), }) - .extend(WebhookEventBase); + .extend(WebhookBaseEvent); export declare namespace WebhookEventChatStarted { - export interface Raw extends WebhookEventBase.Raw { + export interface Raw extends WebhookBaseEvent.Raw { caller_number?: string | null; chat_start_type: WebhookEventChatStartType.Raw; custom_session_id?: string | null; - event_name?: "chat_started" | null; + event_name?: WebhookEventChatStartedEventName.Raw | null; start_time: number; } } diff --git a/src/serialization/resources/empathicVoice/types/WebhookEventChatStartedEventName.ts b/src/serialization/resources/empathicVoice/types/WebhookEventChatStartedEventName.ts new file mode 100644 index 00000000..5ed676fa --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/WebhookEventChatStartedEventName.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const WebhookEventChatStartedEventName: core.serialization.Schema< + serializers.empathicVoice.WebhookEventChatStartedEventName.Raw, + Hume.empathicVoice.WebhookEventChatStartedEventName +> = core.serialization.enum_(["chat_started"]); + +export declare namespace WebhookEventChatStartedEventName { + export type Raw = "chat_started"; +} diff --git a/src/serialization/resources/empathicVoice/types/WebhookEventToolCall.ts b/src/serialization/resources/empathicVoice/types/WebhookEventToolCall.ts index 37a63776..3f95d301 100644 --- a/src/serialization/resources/empathicVoice/types/WebhookEventToolCall.ts +++ b/src/serialization/resources/empathicVoice/types/WebhookEventToolCall.ts @@ -4,26 +4,27 @@ import type * as Hume from "../../../../api/index.js"; import * as core from "../../../../core/index.js"; import type * as serializers from "../../../index.js"; import { ToolCallMessage } from "./ToolCallMessage.js"; -import { WebhookEventBase } from "./WebhookEventBase.js"; +import { WebhookBaseEvent } from "./WebhookBaseEvent.js"; +import { WebhookEventToolCallEventName } from "./WebhookEventToolCallEventName.js"; export const WebhookEventToolCall: core.serialization.ObjectSchema< serializers.empathicVoice.WebhookEventToolCall.Raw, Hume.empathicVoice.WebhookEventToolCall > = core.serialization .object({ - callerNumber: core.serialization.property("caller_number", core.serialization.string().optional()), - customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()), - eventName: core.serialization.property("event_name", core.serialization.stringLiteral("tool_call").optional()), + callerNumber: core.serialization.property("caller_number", core.serialization.string().nullable()), + customSessionId: core.serialization.property("custom_session_id", core.serialization.string().nullable()), + eventName: core.serialization.property("event_name", WebhookEventToolCallEventName.optional()), timestamp: core.serialization.number(), toolCallMessage: core.serialization.property("tool_call_message", ToolCallMessage), }) - .extend(WebhookEventBase); + .extend(WebhookBaseEvent); export declare namespace WebhookEventToolCall { - export interface Raw extends WebhookEventBase.Raw { + export interface Raw extends WebhookBaseEvent.Raw { caller_number?: string | null; custom_session_id?: string | null; - event_name?: "tool_call" | null; + event_name?: WebhookEventToolCallEventName.Raw | null; timestamp: number; tool_call_message: ToolCallMessage.Raw; } diff --git a/src/serialization/resources/empathicVoice/types/WebhookEventToolCallEventName.ts b/src/serialization/resources/empathicVoice/types/WebhookEventToolCallEventName.ts new file mode 100644 index 00000000..5124a834 --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/WebhookEventToolCallEventName.ts @@ -0,0 +1,14 @@ +// This file was auto-generated by Fern from our API Definition. + +import type * as Hume from "../../../../api/index.js"; +import * as core from "../../../../core/index.js"; +import type * as serializers from "../../../index.js"; + +export const WebhookEventToolCallEventName: core.serialization.Schema< + serializers.empathicVoice.WebhookEventToolCallEventName.Raw, + Hume.empathicVoice.WebhookEventToolCallEventName +> = core.serialization.enum_(["tool_call"]); + +export declare namespace WebhookEventToolCallEventName { + export type Raw = "tool_call"; +} diff --git a/src/serialization/resources/empathicVoice/types/index.ts b/src/serialization/resources/empathicVoice/types/index.ts index 378af84a..28c305ac 100644 --- a/src/serialization/resources/empathicVoice/types/index.ts +++ b/src/serialization/resources/empathicVoice/types/index.ts @@ -1,7 +1,9 @@ export * from "./AssistantEnd.js"; export * from "./AssistantInput.js"; +export * from "./AssistantInputType.js"; export * from "./AssistantMessage.js"; export * from "./AssistantProsody.js"; +export * from "./AssistantProsodyType.js"; export * from "./AudioConfiguration.js"; export * from "./AudioInput.js"; export * from "./AudioOutput.js"; @@ -10,6 +12,7 @@ export * from "./BuiltinToolConfig.js"; export * from "./ChatMessage.js"; export * from "./ChatMessageToolResult.js"; export * from "./ChatMetadata.js"; +export * from "./ChatMetadataType.js"; export * from "./ConnectSessionSettings.js"; export * from "./ConnectSessionSettingsAudio.js"; export * from "./ConnectSessionSettingsContext.js"; @@ -21,6 +24,7 @@ export * from "./EmotionScores.js"; export * from "./Encoding.js"; export * from "./ErrorLevel.js"; export * from "./ErrorResponse.js"; +export * from "./ErrorType.js"; export * from "./HttpValidationError.js"; export * from "./Inference.js"; export * from "./JsonMessage.js"; @@ -28,6 +32,7 @@ export * from "./LanguageModelType.js"; export * from "./MillisecondInterval.js"; export * from "./ModelProviderEnum.js"; export * from "./PauseAssistantMessage.js"; +export * from "./PauseAssistantMessageType.js"; export * from "./PostedBuiltinTool.js"; export * from "./PostedBuiltinToolName.js"; export * from "./PostedConfigPromptSpec.js"; @@ -45,6 +50,7 @@ export * from "./PostedWebhookEventType.js"; export * from "./PostedWebhookSpec.js"; export * from "./ProsodyInference.js"; export * from "./ResumeAssistantMessage.js"; +export * from "./ResumeAssistantMessageType.js"; export * from "./ReturnBuiltinTool.js"; export * from "./ReturnBuiltinToolToolType.js"; export * from "./ReturnChat.js"; @@ -90,27 +96,37 @@ export * from "./ReturnWebhookEventType.js"; export * from "./ReturnWebhookSpec.js"; export * from "./Role.js"; export * from "./SessionSettings.js"; +export * from "./SessionSettingsType.js"; export * from "./SessionSettingsVariablesValue.js"; export * from "./SubscribeEvent.js"; export * from "./Tool.js"; export * from "./ToolCallMessage.js"; +export * from "./ToolCallMessageType.js"; export * from "./ToolErrorMessage.js"; +export * from "./ToolErrorMessageType.js"; export * from "./ToolResponseMessage.js"; +export * from "./ToolResponseMessageType.js"; export * from "./ToolType.js"; export * from "./UserInput.js"; +export * from "./UserInputType.js"; export * from "./UserInterruption.js"; +export * from "./UserInterruptionType.js"; export * from "./UserMessage.js"; +export * from "./UserMessageType.js"; export * from "./ValidationError.js"; export * from "./ValidationErrorLocItem.js"; export * from "./VoiceId.js"; export * from "./VoiceName.js"; export * from "./VoiceProvider.js"; export * from "./VoiceRef.js"; +export * from "./WebhookBaseEvent.js"; export * from "./WebhookEvent.js"; -export * from "./WebhookEventBase.js"; export * from "./WebhookEventChatEnded.js"; +export * from "./WebhookEventChatEndedEventName.js"; export * from "./WebhookEventChatStarted.js"; +export * from "./WebhookEventChatStartedEventName.js"; export * from "./WebhookEventChatStartType.js"; export * from "./WebhookEventChatStatus.js"; export * from "./WebhookEventToolCall.js"; +export * from "./WebhookEventToolCallEventName.js"; export * from "./WebSocketError.js"; diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Alternative.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Alternative.ts index 6de892c1..b134cb8e 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Alternative.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Alternative.ts @@ -7,7 +7,7 @@ import type * as serializers from "../../../../../index.js"; export const Alternative: core.serialization.Schema< serializers.expressionMeasurement.batch.Alternative.Raw, Hume.expressionMeasurement.batch.Alternative -> = core.serialization.stringLiteral("language_only"); +> = core.serialization.enum_(["language_only"]); export declare namespace Alternative { export type Raw = "language_only"; diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts b/src/serialization/resources/expressionMeasurement/resources/stream/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts index 8a401d82..95853209 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts @@ -11,7 +11,7 @@ export const StreamModelPredictionsFacePredictionsItem: core.serialization.Objec Hume.expressionMeasurement.stream.StreamModelPredictionsFacePredictionsItem > = core.serialization.object({ frame: core.serialization.number().optional(), - time: core.serialization.number().optional(), + time: core.serialization.number().optionalNullable(), bbox: StreamBoundingBox.optional(), prob: core.serialization.number().optional(), faceId: core.serialization.property("face_id", core.serialization.string().optional()), @@ -23,7 +23,7 @@ export const StreamModelPredictionsFacePredictionsItem: core.serialization.Objec export declare namespace StreamModelPredictionsFacePredictionsItem { export interface Raw { frame?: number | null; - time?: number | null; + time?: (number | null | undefined) | null; bbox?: StreamBoundingBox.Raw | null; prob?: number | null; face_id?: string | null; diff --git a/src/serialization/resources/tts/types/PostedTts.ts b/src/serialization/resources/tts/types/PostedTts.ts index f408ebb4..0c9f62ae 100644 --- a/src/serialization/resources/tts/types/PostedTts.ts +++ b/src/serialization/resources/tts/types/PostedTts.ts @@ -11,7 +11,7 @@ import { TimestampType } from "./TimestampType.js"; export const PostedTts: core.serialization.ObjectSchema = core.serialization.object({ - context: PostedContext.optional(), + context: PostedContext.optionalNullable(), format: Format.optional(), includeTimestampTypes: core.serialization.property( "include_timestamp_types", @@ -27,7 +27,7 @@ export const PostedTts: core.serialization.ObjectSchema = core.serialization.object({ - description: core.serialization.string().optional(), + description: core.serialization.string().optionalNullable(), speed: core.serialization.number().optional(), text: core.serialization.string(), trailingSilence: core.serialization.property("trailing_silence", core.serialization.number().optional()), - voice: PostedUtteranceVoice.optional(), + voice: PostedUtteranceVoice.optionalNullable(), }); export declare namespace PostedUtterance { export interface Raw { - description?: string | null; + description?: (string | null | undefined) | null; speed?: number | null; text: string; trailing_silence?: number | null; - voice?: PostedUtteranceVoice.Raw | null; + voice?: (PostedUtteranceVoice.Raw | null | undefined) | null; } } diff --git a/src/serialization/resources/tts/types/PublishTts.ts b/src/serialization/resources/tts/types/PublishTts.ts index 4bf99638..fa61eb65 100644 --- a/src/serialization/resources/tts/types/PublishTts.ts +++ b/src/serialization/resources/tts/types/PublishTts.ts @@ -8,22 +8,22 @@ import { PostedUtteranceVoice } from "./PostedUtteranceVoice.js"; export const PublishTts: core.serialization.ObjectSchema = core.serialization.object({ close: core.serialization.boolean().optional(), - description: core.serialization.string().optional(), + description: core.serialization.string().optionalNullable(), flush: core.serialization.boolean().optional(), speed: core.serialization.number().optional(), text: core.serialization.string().optional(), trailingSilence: core.serialization.property("trailing_silence", core.serialization.number().optional()), - voice: PostedUtteranceVoice.optional(), + voice: PostedUtteranceVoice.optionalNullable(), }); export declare namespace PublishTts { export interface Raw { close?: boolean | null; - description?: string | null; + description?: (string | null | undefined) | null; flush?: boolean | null; speed?: number | null; text?: string | null; trailing_silence?: number | null; - voice?: PostedUtteranceVoice.Raw | null; + voice?: (PostedUtteranceVoice.Raw | null | undefined) | null; } } diff --git a/src/serialization/resources/tts/types/ReturnTts.ts b/src/serialization/resources/tts/types/ReturnTts.ts index 02808891..04dccc0c 100644 --- a/src/serialization/resources/tts/types/ReturnTts.ts +++ b/src/serialization/resources/tts/types/ReturnTts.ts @@ -8,7 +8,7 @@ import { ReturnGeneration } from "./ReturnGeneration.js"; export const ReturnTts: core.serialization.ObjectSchema = core.serialization.object({ generations: core.serialization.list(ReturnGeneration), - requestId: core.serialization.property("request_id", core.serialization.string().optional()), + requestId: core.serialization.property("request_id", core.serialization.string().nullable()), }); export declare namespace ReturnTts { diff --git a/src/serialization/resources/tts/types/Snippet.ts b/src/serialization/resources/tts/types/Snippet.ts index 2a78206a..6302daa9 100644 --- a/src/serialization/resources/tts/types/Snippet.ts +++ b/src/serialization/resources/tts/types/Snippet.ts @@ -12,8 +12,8 @@ export const Snippet: core.serialization.ObjectSchema { .jsonBody(rawResponseBody) .build(); - const response = await client.empathicVoice.chatGroups.getChatGroup("697056f0-6c7e-487d-9bd8-9c19df79f05f", { + const response = await client.empathicVoice.chatGroups.getChatGroup({ + id: "697056f0-6c7e-487d-9bd8-9c19df79f05f", pageNumber: 0, pageSize: 1, ascendingOrder: true, @@ -167,7 +168,9 @@ describe("ChatGroups", () => { .build(); await expect(async () => { - return await client.empathicVoice.chatGroups.getChatGroup("id"); + return await client.empathicVoice.chatGroups.getChatGroup({ + id: "id", + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); @@ -207,7 +210,8 @@ describe("ChatGroups", () => { .jsonBody(rawResponseBody) .build(); - const response = await client.empathicVoice.chatGroups.getAudio("369846cf-6ad5-404d-905e-a8acb5cdfc78", { + const response = await client.empathicVoice.chatGroups.getAudio({ + id: "369846cf-6ad5-404d-905e-a8acb5cdfc78", pageNumber: 0, pageSize: 10, ascendingOrder: true, @@ -252,7 +256,9 @@ describe("ChatGroups", () => { .build(); await expect(async () => { - return await client.empathicVoice.chatGroups.getAudio("id"); + return await client.empathicVoice.chatGroups.getAudio({ + id: "id", + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); @@ -357,7 +363,8 @@ describe("ChatGroups", () => { }, ], }; - const page = await client.empathicVoice.chatGroups.listChatGroupEvents("697056f0-6c7e-487d-9bd8-9c19df79f05f", { + const page = await client.empathicVoice.chatGroups.listChatGroupEvents({ + id: "697056f0-6c7e-487d-9bd8-9c19df79f05f", pageNumber: 0, pageSize: 3, ascendingOrder: true, @@ -386,7 +393,9 @@ describe("ChatGroups", () => { .build(); await expect(async () => { - return await client.empathicVoice.chatGroups.listChatGroupEvents("id"); + return await client.empathicVoice.chatGroups.listChatGroupEvents({ + id: "id", + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); }); diff --git a/tests/wire/empathic-voice/chats.test.ts b/tests/wire/empathic-voice/chats.test.ts index 169cd49e..8f8af41b 100644 --- a/tests/wire/empathic-voice/chats.test.ts +++ b/tests/wire/empathic-voice/chats.test.ts @@ -196,7 +196,8 @@ describe("Chats", () => { version: 0, }, }; - const page = await client.empathicVoice.chats.listChatEvents("470a49f6-1dec-4afe-8b61-035d3b2d63b0", { + const page = await client.empathicVoice.chats.listChatEvents({ + id: "470a49f6-1dec-4afe-8b61-035d3b2d63b0", pageNumber: 0, pageSize: 3, ascendingOrder: true, @@ -219,7 +220,9 @@ describe("Chats", () => { server.mockEndpoint().get("/v0/evi/chats/id").respondWith().statusCode(400).jsonBody(rawResponseBody).build(); await expect(async () => { - return await client.empathicVoice.chats.listChatEvents("id"); + return await client.empathicVoice.chats.listChatEvents({ + id: "id", + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); @@ -248,7 +251,9 @@ describe("Chats", () => { .jsonBody(rawResponseBody) .build(); - const response = await client.empathicVoice.chats.getAudio("470a49f6-1dec-4afe-8b61-035d3b2d63b0"); + const response = await client.empathicVoice.chats.getAudio({ + id: "470a49f6-1dec-4afe-8b61-035d3b2d63b0", + }); expect(response).toEqual({ id: "470a49f6-1dec-4afe-8b61-035d3b2d63b0", userId: "e6235940-cfda-3988-9147-ff531627cf42", @@ -278,7 +283,9 @@ describe("Chats", () => { .build(); await expect(async () => { - return await client.empathicVoice.chats.getAudio("id"); + return await client.empathicVoice.chats.getAudio({ + id: "id", + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); }); diff --git a/tests/wire/empathic-voice/configs.test.ts b/tests/wire/empathic-voice/configs.test.ts index f45d8bb4..fb5e3d75 100644 --- a/tests/wire/empathic-voice/configs.test.ts +++ b/tests/wire/empathic-voice/configs.test.ts @@ -449,7 +449,9 @@ describe("Configs", () => { }, ], }; - const page = await client.empathicVoice.configs.listConfigVersions("1b60e1a0-cc59-424a-8d2c-189d354db3f3"); + const page = await client.empathicVoice.configs.listConfigVersions({ + id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + }); expect(expected.configsPage).toEqual(page.data); expect(page.hasNextPage()).toBe(true); @@ -468,7 +470,9 @@ describe("Configs", () => { server.mockEndpoint().get("/v0/evi/configs/id").respondWith().statusCode(400).jsonBody(rawResponseBody).build(); await expect(async () => { - return await client.empathicVoice.configs.listConfigVersions("id"); + return await client.empathicVoice.configs.listConfigVersions({ + id: "id", + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); @@ -533,43 +537,41 @@ describe("Configs", () => { .jsonBody(rawResponseBody) .build(); - const response = await client.empathicVoice.configs.createConfigVersion( - "1b60e1a0-cc59-424a-8d2c-189d354db3f3", - { - versionDescription: "This is an updated version of the Weather Assistant Config.", - eviVersion: "3", - prompt: { - id: "af699d45-2985-42cc-91b9-af9e5da3bac5", - version: 0, - }, - voice: { - provider: "HUME_AI", - name: "Ava Song", - }, - languageModel: { - modelProvider: "ANTHROPIC", - modelResource: "claude-3-7-sonnet-latest", - temperature: 1, + const response = await client.empathicVoice.configs.createConfigVersion({ + id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + versionDescription: "This is an updated version of the Weather Assistant Config.", + eviVersion: "3", + prompt: { + id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + version: 0, + }, + voice: { + provider: "HUME_AI", + name: "Ava Song", + }, + languageModel: { + modelProvider: "ANTHROPIC", + modelResource: "claude-3-7-sonnet-latest", + temperature: 1, + }, + ellmModel: { + allowShortResponses: true, + }, + eventMessages: { + onNewChat: { + enabled: false, + text: "", }, - ellmModel: { - allowShortResponses: true, + onInactivityTimeout: { + enabled: false, + text: "", }, - eventMessages: { - onNewChat: { - enabled: false, - text: "", - }, - onInactivityTimeout: { - enabled: false, - text: "", - }, - onMaxDurationTimeout: { - enabled: false, - text: "", - }, + onMaxDurationTimeout: { + enabled: false, + text: "", }, }, - ); + }); expect(response).toEqual({ id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", version: 1, @@ -648,7 +650,8 @@ describe("Configs", () => { .build(); await expect(async () => { - return await client.empathicVoice.configs.createConfigVersion("id", { + return await client.empathicVoice.configs.createConfigVersion({ + id: "id", eviVersion: "evi_version", }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); @@ -668,7 +671,9 @@ describe("Configs", () => { .statusCode(200) .build(); - const response = await client.empathicVoice.configs.deleteConfig("1b60e1a0-cc59-424a-8d2c-189d354db3f3"); + const response = await client.empathicVoice.configs.deleteConfig({ + id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + }); expect(response).toEqual(undefined); }); @@ -689,7 +694,9 @@ describe("Configs", () => { .build(); await expect(async () => { - return await client.empathicVoice.configs.deleteConfig("id"); + return await client.empathicVoice.configs.deleteConfig({ + id: "id", + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); @@ -741,7 +748,10 @@ describe("Configs", () => { .jsonBody(rawResponseBody) .build(); - const response = await client.empathicVoice.configs.getConfigVersion("1b60e1a0-cc59-424a-8d2c-189d354db3f3", 1); + const response = await client.empathicVoice.configs.getConfigVersion({ + id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version: 1, + }); expect(response).toEqual({ id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", version: 1, @@ -819,7 +829,10 @@ describe("Configs", () => { .build(); await expect(async () => { - return await client.empathicVoice.configs.getConfigVersion("id", 1); + return await client.empathicVoice.configs.getConfigVersion({ + id: "id", + version: 1, + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); @@ -837,10 +850,10 @@ describe("Configs", () => { .statusCode(200) .build(); - const response = await client.empathicVoice.configs.deleteConfigVersion( - "1b60e1a0-cc59-424a-8d2c-189d354db3f3", - 1, - ); + const response = await client.empathicVoice.configs.deleteConfigVersion({ + id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version: 1, + }); expect(response).toEqual(undefined); }); @@ -861,7 +874,10 @@ describe("Configs", () => { .build(); await expect(async () => { - return await client.empathicVoice.configs.deleteConfigVersion("id", 1); + return await client.empathicVoice.configs.deleteConfigVersion({ + id: "id", + version: 1, + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); @@ -914,13 +930,11 @@ describe("Configs", () => { .jsonBody(rawResponseBody) .build(); - const response = await client.empathicVoice.configs.updateConfigDescription( - "1b60e1a0-cc59-424a-8d2c-189d354db3f3", - 1, - { - versionDescription: "This is an updated version_description.", - }, - ); + const response = await client.empathicVoice.configs.updateConfigDescription({ + id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version: 1, + versionDescription: "This is an updated version_description.", + }); expect(response).toEqual({ id: "1b60e1a0-cc59-424a-8d2c-189d354db3f3", version: 1, @@ -999,7 +1013,10 @@ describe("Configs", () => { .build(); await expect(async () => { - return await client.empathicVoice.configs.updateConfigDescription("id", 1); + return await client.empathicVoice.configs.updateConfigDescription({ + id: "id", + version: 1, + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); }); diff --git a/tests/wire/empathic-voice/controlPlane.test.ts b/tests/wire/empathic-voice/controlPlane.test.ts index 4ff6a562..1bc04e44 100644 --- a/tests/wire/empathic-voice/controlPlane.test.ts +++ b/tests/wire/empathic-voice/controlPlane.test.ts @@ -21,8 +21,11 @@ describe("ControlPlane", () => { .statusCode(200) .build(); - const response = await client.empathicVoice.controlPlane.send("chat_id", { - type: "session_settings", + const response = await client.empathicVoice.controlPlane.send({ + chatId: "chat_id", + body: { + type: "session_settings", + }, }); expect(response).toEqual(undefined); }); @@ -45,8 +48,11 @@ describe("ControlPlane", () => { .build(); await expect(async () => { - return await client.empathicVoice.controlPlane.send("chat_id", { - type: "session_settings", + return await client.empathicVoice.controlPlane.send({ + chatId: "chat_id", + body: { + type: "session_settings", + }, }); }).rejects.toThrow(Hume.empathicVoice.UnprocessableEntityError); }); diff --git a/tests/wire/empathic-voice/prompts.test.ts b/tests/wire/empathic-voice/prompts.test.ts index 635edbc0..4abee490 100644 --- a/tests/wire/empathic-voice/prompts.test.ts +++ b/tests/wire/empathic-voice/prompts.test.ts @@ -194,7 +194,9 @@ describe("Prompts", () => { .jsonBody(rawResponseBody) .build(); - const response = await client.empathicVoice.prompts.listPromptVersions("af699d45-2985-42cc-91b9-af9e5da3bac5"); + const response = await client.empathicVoice.prompts.listPromptVersions({ + id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + }); expect(response).toEqual({ pageNumber: 0, pageSize: 10, @@ -225,7 +227,9 @@ describe("Prompts", () => { server.mockEndpoint().get("/v0/evi/prompts/id").respondWith().statusCode(400).jsonBody(rawResponseBody).build(); await expect(async () => { - return await client.empathicVoice.prompts.listPromptVersions("id"); + return await client.empathicVoice.prompts.listPromptVersions({ + id: "id", + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); @@ -258,13 +262,11 @@ describe("Prompts", () => { .jsonBody(rawResponseBody) .build(); - const response = await client.empathicVoice.prompts.createPromptVersion( - "af699d45-2985-42cc-91b9-af9e5da3bac5", - { - text: "You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", - versionDescription: "This is an updated version of the Weather Assistant Prompt.", - }, - ); + const response = await client.empathicVoice.prompts.createPromptVersion({ + id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + text: "You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", + versionDescription: "This is an updated version of the Weather Assistant Prompt.", + }); expect(response).toEqual({ id: "af699d45-2985-42cc-91b9-af9e5da3bac5", version: 1, @@ -295,7 +297,8 @@ describe("Prompts", () => { .build(); await expect(async () => { - return await client.empathicVoice.prompts.createPromptVersion("id", { + return await client.empathicVoice.prompts.createPromptVersion({ + id: "id", text: "text", }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); @@ -315,7 +318,9 @@ describe("Prompts", () => { .statusCode(200) .build(); - const response = await client.empathicVoice.prompts.deletePrompt("af699d45-2985-42cc-91b9-af9e5da3bac5"); + const response = await client.empathicVoice.prompts.deletePrompt({ + id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + }); expect(response).toEqual(undefined); }); @@ -336,7 +341,9 @@ describe("Prompts", () => { .build(); await expect(async () => { - return await client.empathicVoice.prompts.deletePrompt("id"); + return await client.empathicVoice.prompts.deletePrompt({ + id: "id", + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); @@ -365,7 +372,10 @@ describe("Prompts", () => { .jsonBody(rawResponseBody) .build(); - const response = await client.empathicVoice.prompts.getPromptVersion("af699d45-2985-42cc-91b9-af9e5da3bac5", 0); + const response = await client.empathicVoice.prompts.getPromptVersion({ + id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + version: 0, + }); expect(response).toEqual({ id: "af699d45-2985-42cc-91b9-af9e5da3bac5", version: 0, @@ -395,7 +405,10 @@ describe("Prompts", () => { .build(); await expect(async () => { - return await client.empathicVoice.prompts.getPromptVersion("id", 1); + return await client.empathicVoice.prompts.getPromptVersion({ + id: "id", + version: 1, + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); @@ -413,10 +426,10 @@ describe("Prompts", () => { .statusCode(200) .build(); - const response = await client.empathicVoice.prompts.deletePromptVersion( - "af699d45-2985-42cc-91b9-af9e5da3bac5", - 1, - ); + const response = await client.empathicVoice.prompts.deletePromptVersion({ + id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + version: 1, + }); expect(response).toEqual(undefined); }); @@ -437,7 +450,10 @@ describe("Prompts", () => { .build(); await expect(async () => { - return await client.empathicVoice.prompts.deletePromptVersion("id", 1); + return await client.empathicVoice.prompts.deletePromptVersion({ + id: "id", + version: 1, + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); @@ -467,13 +483,11 @@ describe("Prompts", () => { .jsonBody(rawResponseBody) .build(); - const response = await client.empathicVoice.prompts.updatePromptDescription( - "af699d45-2985-42cc-91b9-af9e5da3bac5", - 1, - { - versionDescription: "This is an updated version_description.", - }, - ); + const response = await client.empathicVoice.prompts.updatePromptDescription({ + id: "af699d45-2985-42cc-91b9-af9e5da3bac5", + version: 1, + versionDescription: "This is an updated version_description.", + }); expect(response).toEqual({ id: "af699d45-2985-42cc-91b9-af9e5da3bac5", version: 1, @@ -504,7 +518,10 @@ describe("Prompts", () => { .build(); await expect(async () => { - return await client.empathicVoice.prompts.updatePromptDescription("id", 1); + return await client.empathicVoice.prompts.updatePromptDescription({ + id: "id", + version: 1, + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); }); diff --git a/tests/wire/empathic-voice/tools.test.ts b/tests/wire/empathic-voice/tools.test.ts index 9ea82366..cd218655 100644 --- a/tests/wire/empathic-voice/tools.test.ts +++ b/tests/wire/empathic-voice/tools.test.ts @@ -257,7 +257,9 @@ describe("Tools", () => { }, ], }; - const page = await client.empathicVoice.tools.listToolVersions("00183a3f-79ba-413d-9f3b-609864268bea"); + const page = await client.empathicVoice.tools.listToolVersions({ + id: "00183a3f-79ba-413d-9f3b-609864268bea", + }); expect(expected.toolsPage).toEqual(page.data); expect(page.hasNextPage()).toBe(true); @@ -276,7 +278,9 @@ describe("Tools", () => { server.mockEndpoint().get("/v0/evi/tools/id").respondWith().statusCode(400).jsonBody(rawResponseBody).build(); await expect(async () => { - return await client.empathicVoice.tools.listToolVersions("id"); + return await client.empathicVoice.tools.listToolVersions({ + id: "id", + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); @@ -318,7 +322,8 @@ describe("Tools", () => { .jsonBody(rawResponseBody) .build(); - const response = await client.empathicVoice.tools.createToolVersion("00183a3f-79ba-413d-9f3b-609864268bea", { + const response = await client.empathicVoice.tools.createToolVersion({ + id: "00183a3f-79ba-413d-9f3b-609864268bea", parameters: '{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit", "kelvin"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', versionDescription: @@ -361,7 +366,8 @@ describe("Tools", () => { .build(); await expect(async () => { - return await client.empathicVoice.tools.createToolVersion("id", { + return await client.empathicVoice.tools.createToolVersion({ + id: "id", parameters: "parameters", }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); @@ -381,7 +387,9 @@ describe("Tools", () => { .statusCode(200) .build(); - const response = await client.empathicVoice.tools.deleteTool("00183a3f-79ba-413d-9f3b-609864268bea"); + const response = await client.empathicVoice.tools.deleteTool({ + id: "00183a3f-79ba-413d-9f3b-609864268bea", + }); expect(response).toEqual(undefined); }); @@ -402,7 +410,9 @@ describe("Tools", () => { .build(); await expect(async () => { - return await client.empathicVoice.tools.deleteTool("id"); + return await client.empathicVoice.tools.deleteTool({ + id: "id", + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); @@ -436,7 +446,10 @@ describe("Tools", () => { .jsonBody(rawResponseBody) .build(); - const response = await client.empathicVoice.tools.getToolVersion("00183a3f-79ba-413d-9f3b-609864268bea", 1); + const response = await client.empathicVoice.tools.getToolVersion({ + id: "00183a3f-79ba-413d-9f3b-609864268bea", + version: 1, + }); expect(response).toEqual({ toolType: "FUNCTION", id: "00183a3f-79ba-413d-9f3b-609864268bea", @@ -471,7 +484,10 @@ describe("Tools", () => { .build(); await expect(async () => { - return await client.empathicVoice.tools.getToolVersion("id", 1); + return await client.empathicVoice.tools.getToolVersion({ + id: "id", + version: 1, + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); @@ -489,7 +505,10 @@ describe("Tools", () => { .statusCode(200) .build(); - const response = await client.empathicVoice.tools.deleteToolVersion("00183a3f-79ba-413d-9f3b-609864268bea", 1); + const response = await client.empathicVoice.tools.deleteToolVersion({ + id: "00183a3f-79ba-413d-9f3b-609864268bea", + version: 1, + }); expect(response).toEqual(undefined); }); @@ -510,7 +529,10 @@ describe("Tools", () => { .build(); await expect(async () => { - return await client.empathicVoice.tools.deleteToolVersion("id", 1); + return await client.empathicVoice.tools.deleteToolVersion({ + id: "id", + version: 1, + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); @@ -548,14 +570,12 @@ describe("Tools", () => { .jsonBody(rawResponseBody) .build(); - const response = await client.empathicVoice.tools.updateToolDescription( - "00183a3f-79ba-413d-9f3b-609864268bea", - 1, - { - versionDescription: - "Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region.", - }, - ); + const response = await client.empathicVoice.tools.updateToolDescription({ + id: "00183a3f-79ba-413d-9f3b-609864268bea", + version: 1, + versionDescription: + "Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region.", + }); expect(response).toEqual({ toolType: "FUNCTION", id: "00183a3f-79ba-413d-9f3b-609864268bea", @@ -591,7 +611,10 @@ describe("Tools", () => { .build(); await expect(async () => { - return await client.empathicVoice.tools.updateToolDescription("id", 1); + return await client.empathicVoice.tools.updateToolDescription({ + id: "id", + version: 1, + }); }).rejects.toThrow(Hume.empathicVoice.BadRequestError); }); }); diff --git a/tests/wire/expression-measurement/batch/main.test.ts b/tests/wire/expression-measurement/batch/main.test.ts index b734b858..c0e38a30 100644 --- a/tests/wire/expression-measurement/batch/main.test.ts +++ b/tests/wire/expression-measurement/batch/main.test.ts @@ -171,7 +171,9 @@ describe("Batch", () => { .jsonBody(rawResponseBody) .build(); - const response = await client.expressionMeasurement.batch.getJobDetails("job_id"); + const response = await client.expressionMeasurement.batch.getJobDetails({ + id: "job_id", + }); expect(response).toEqual({ type: "INFERENCE", jobId: "job_id", @@ -314,7 +316,9 @@ describe("Batch", () => { .jsonBody(rawResponseBody) .build(); - const response = await client.expressionMeasurement.batch.getJobPredictions("job_id"); + const response = await client.expressionMeasurement.batch.getJobPredictions({ + id: "job_id", + }); expect(response).toEqual([ { source: {