From 93b0ff3c37f9fa5eaf732a6ec0bbf2ffd13e947b Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Thu, 11 Sep 2025 19:24:36 +0000 Subject: [PATCH] SDK regeneration --- build.gradle | 4 +- reference.md | 113 ++++++------- .../java/com/cohere/api/AsyncRawCohere.java | 12 +- src/main/java/com/cohere/api/RawCohere.java | 12 +- .../com/cohere/api/core/ClientOptions.java | 4 +- .../com/cohere/api/requests/ChatRequest.java | 106 ++++++------ .../api/requests/ChatStreamRequest.java | 106 ++++++------ .../resources/v2/requests/V2ChatRequest.java | 114 +++++-------- .../v2/requests/V2ChatStreamRequest.java | 114 +++++-------- .../resources/v2/requests/V2EmbedRequest.java | 4 + .../AssistantMessageResponseContentItem.java | 14 +- .../types/AssistantMessageV2ContentItem.java | 58 ++++++- ...tContentDeltaEventDeltaMessageContent.java | 31 +++- ...tContentStartEventDeltaMessageContent.java | 29 +++- .../cohere/api/types/ChatThinkingContent.java | 102 +++++++++++ .../types/EmbedByTypeResponseEmbeddings.java | 37 +++- .../com/cohere/api/types/EmbeddingType.java | 4 +- .../java/com/cohere/api/types/Thinking.java | 160 ++++++++++++++++++ ...ReasoningEffort.java => ThinkingType.java} | 10 +- 19 files changed, 693 insertions(+), 341 deletions(-) create mode 100644 src/main/java/com/cohere/api/types/ChatThinkingContent.java create mode 100644 src/main/java/com/cohere/api/types/Thinking.java rename src/main/java/com/cohere/api/types/{ReasoningEffort.java => ThinkingType.java} (72%) diff --git a/build.gradle b/build.gradle index caef60e..102bc1e 100644 --- a/build.gradle +++ b/build.gradle @@ -46,7 +46,7 @@ java { group = 'com.cohere' -version = '1.8.1' +version = '1.8.0' jar { dependsOn(":generatePomFileForMavenPublication") @@ -77,7 +77,7 @@ publishing { maven(MavenPublication) { groupId = 'com.cohere' artifactId = 'cohere-java' - version = '1.8.1' + version = '1.8.0' from components.java pom { name = 'cohere' diff --git a/reference.md b/reference.md index fa45643..0a2e614 100644 --- a/reference.md +++ b/reference.md @@ -57,19 +57,6 @@ client.chatStream(
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- */ - @JsonProperty("raw_prompting") - public OptionalCompatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
@@ -377,6 +367,16 @@ public OptionalCompatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
+ */ + @JsonProperty("raw_prompting") + public OptionalWhen tools is passed (without tool_results), the text field in the response will be "" and the tool_calls field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the tool_calls array will be empty.
When enabled, the user's prompt will be sent to the model without - * any pre-processing.
- *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- */ - _FinalStage rawPrompting(OptionalThe name of a compatible Cohere model or the ID of a fine-tuned model.
*Compatible Deployments: Cohere Platform, Private Deployments
@@ -729,6 +720,15 @@ public interface _FinalStage { _FinalStage presencePenalty(Double presencePenalty); + /** + *When enabled, the user's prompt will be sent to the model without + * any pre-processing.
+ *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
+ */ + _FinalStage rawPrompting(OptionalA list of available tools (functions) that the model may suggest invoking before producing a text response.
*When tools is passed (without tool_results), the text field in the response will be "" and the tool_calls field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the tool_calls array will be empty.
When enabled, the user's prompt will be sent to the model without + * any pre-processing.
+ *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
+ * @return Reference to {@code this} so that method calls can be chained together. + */ + @java.lang.Override + public _FinalStage rawPrompting(Boolean rawPrompting) { + this.rawPrompting = Optional.ofNullable(rawPrompting); + return this; + } + + /** + *When enabled, the user's prompt will be sent to the model without + * any pre-processing.
+ *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
+ */ + @java.lang.Override + @JsonSetter(value = "raw_prompting", nulls = Nulls.SKIP) + public _FinalStage rawPrompting(OptionalDefaults to 0.0, min value of 0.0, max value of 1.0.
Used to reduce repetitiveness of generated tokens. Similar to frequency_penalty, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
When enabled, the user's prompt will be sent to the model without - * any pre-processing.
- *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- * @return Reference to {@code this} so that method calls can be chained together. - */ - @java.lang.Override - public _FinalStage rawPrompting(Boolean rawPrompting) { - this.rawPrompting = Optional.ofNullable(rawPrompting); - return this; - } - - /** - *When enabled, the user's prompt will be sent to the model without - * any pre-processing.
- *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- */ - @java.lang.Override - @JsonSetter(value = "raw_prompting", nulls = Nulls.SKIP) - public _FinalStage rawPrompting(OptionalPass text/event-stream to receive the streamed response as server-sent events. The default is \n delimited events.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- */ - @JsonProperty("raw_prompting") - public OptionalCompatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
@@ -377,6 +367,16 @@ public OptionalCompatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
+ */ + @JsonProperty("raw_prompting") + public OptionalWhen tools is passed (without tool_results), the text field in the response will be "" and the tool_calls field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the tool_calls array will be empty.
When enabled, the user's prompt will be sent to the model without - * any pre-processing.
- *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- */ - _FinalStage rawPrompting(OptionalThe name of a compatible Cohere model or the ID of a fine-tuned model.
*Compatible Deployments: Cohere Platform, Private Deployments
@@ -729,6 +720,15 @@ public interface _FinalStage { _FinalStage presencePenalty(Double presencePenalty); + /** + *When enabled, the user's prompt will be sent to the model without + * any pre-processing.
+ *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
+ */ + _FinalStage rawPrompting(OptionalA list of available tools (functions) that the model may suggest invoking before producing a text response.
*When tools is passed (without tool_results), the text field in the response will be "" and the tool_calls field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the tool_calls array will be empty.
When enabled, the user's prompt will be sent to the model without + * any pre-processing.
+ *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
+ * @return Reference to {@code this} so that method calls can be chained together. + */ + @java.lang.Override + public _FinalStage rawPrompting(Boolean rawPrompting) { + this.rawPrompting = Optional.ofNullable(rawPrompting); + return this; + } + + /** + *When enabled, the user's prompt will be sent to the model without + * any pre-processing.
+ *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
+ */ + @java.lang.Override + @JsonSetter(value = "raw_prompting", nulls = Nulls.SKIP) + public _FinalStage rawPrompting(OptionalDefaults to 0.0, min value of 0.0, max value of 1.0.
Used to reduce repetitiveness of generated tokens. Similar to frequency_penalty, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
When enabled, the user's prompt will be sent to the model without - * any pre-processing.
- *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- * @return Reference to {@code this} so that method calls can be chained together. - */ - @java.lang.Override - public _FinalStage rawPrompting(Boolean rawPrompting) { - this.rawPrompting = Optional.ofNullable(rawPrompting); - return this; - } - - /** - *When enabled, the user's prompt will be sent to the model without - * any pre-processing.
- *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- */ - @java.lang.Override - @JsonSetter(value = "raw_prompting", nulls = Nulls.SKIP) - public _FinalStage rawPrompting(OptionalPass text/event-stream to receive the streamed response as server-sent events. The default is \n delimited events.
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- */ - @JsonProperty("raw_prompting") - public OptionalNote: Setting a low value may result in incomplete generations.
+ * @return The maximum number of output tokens the model will generate in the response. If not set,max_tokens defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the model documentation.
+ * Note: Setting a low value may result in incomplete generations. In such cases, the finish_reason field in the response will be set to "MAX_TOKENS".
Note: If max_tokens is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
NONE is specified, the model will be forced not to use one of the specified tools, and give a direct response.
* If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not.
* Note: This parameter is only compatible with models Command-r7b and newer.
- *Note: The same functionality can be achieved in /v1/chat using the force_single_step parameter. If force_single_step=true, this is equivalent to specifying REQUIRED. While if force_single_step=true and tool_results are passed, this is equivalent to specifying NONE.
When enabled, the user's prompt will be sent to the model without - * any pre-processing.
- *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- */ - _FinalStage rawPrompting(OptionalThe maximum number of tokens the model will generate as part of the response.
- *Note: Setting a low value may result in incomplete generations.
+ *The maximum number of output tokens the model will generate in the response. If not set, max_tokens defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the model documentation.
Note: Setting a low value may result in incomplete generations. In such cases, the finish_reason field in the response will be set to "MAX_TOKENS".
Note: If max_tokens is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
NONE is specified, the model will be forced not to use one of the specified tools, and give a direct response.
* If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not.
* Note: This parameter is only compatible with models Command-r7b and newer.
- *Note: The same functionality can be achieved in /v1/chat using the force_single_step parameter. If force_single_step=true, this is equivalent to specifying REQUIRED. While if force_single_step=true and tool_results are passed, this is equivalent to specifying NONE.
Used to control whether or not the model will be forced to use a tool when answering. When REQUIRED is specified, the model will be forced to use at least one of the user-defined tools, and the tools parameter must be passed in the request.
* When NONE is specified, the model will be forced not to use one of the specified tools, and give a direct response.
* If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not.
Note: This parameter is only compatible with models Command-r7b and newer.
- *Note: The same functionality can be achieved in /v1/chat using the force_single_step parameter. If force_single_step=true, this is equivalent to specifying REQUIRED. While if force_single_step=true and tool_results are passed, this is equivalent to specifying NONE.
NONE is specified, the model will be forced not to use one of the specified tools, and give a direct response.
* If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not.
* Note: This parameter is only compatible with models Command-r7b and newer.
- *Note: The same functionality can be achieved in /v1/chat using the force_single_step parameter. If force_single_step=true, this is equivalent to specifying REQUIRED. While if force_single_step=true and tool_results are passed, this is equivalent to specifying NONE.
The maximum number of tokens the model will generate as part of the response.
- *Note: Setting a low value may result in incomplete generations.
+ *The maximum number of output tokens the model will generate in the response. If not set, max_tokens defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the model documentation.
Note: Setting a low value may result in incomplete generations. In such cases, the finish_reason field in the response will be set to "MAX_TOKENS".
Note: If max_tokens is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
The maximum number of tokens the model will generate as part of the response.
- *Note: Setting a low value may result in incomplete generations.
+ *The maximum number of output tokens the model will generate in the response. If not set, max_tokens defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the model documentation.
Note: Setting a low value may result in incomplete generations. In such cases, the finish_reason field in the response will be set to "MAX_TOKENS".
Note: If max_tokens is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
When enabled, the user's prompt will be sent to the model without - * any pre-processing.
- *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- * @return Reference to {@code this} so that method calls can be chained together. - */ - @java.lang.Override - public _FinalStage rawPrompting(Boolean rawPrompting) { - this.rawPrompting = Optional.ofNullable(rawPrompting); - return this; - } - - /** - *When enabled, the user's prompt will be sent to the model without - * any pre-processing.
- *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- */ - @java.lang.Override - @JsonSetter(value = "raw_prompting", nulls = Nulls.SKIP) - public _FinalStage rawPrompting(OptionalCompatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- */ - @JsonProperty("raw_prompting") - public OptionalNote: Setting a low value may result in incomplete generations.
+ * @return The maximum number of output tokens the model will generate in the response. If not set,max_tokens defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the model documentation.
+ * Note: Setting a low value may result in incomplete generations. In such cases, the finish_reason field in the response will be set to "MAX_TOKENS".
Note: If max_tokens is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
NONE is specified, the model will be forced not to use one of the specified tools, and give a direct response.
* If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not.
* Note: This parameter is only compatible with models Command-r7b and newer.
- *Note: The same functionality can be achieved in /v1/chat using the force_single_step parameter. If force_single_step=true, this is equivalent to specifying REQUIRED. While if force_single_step=true and tool_results are passed, this is equivalent to specifying NONE.
When enabled, the user's prompt will be sent to the model without - * any pre-processing.
- *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- */ - _FinalStage rawPrompting(OptionalThe maximum number of tokens the model will generate as part of the response.
- *Note: Setting a low value may result in incomplete generations.
+ *The maximum number of output tokens the model will generate in the response. If not set, max_tokens defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the model documentation.
Note: Setting a low value may result in incomplete generations. In such cases, the finish_reason field in the response will be set to "MAX_TOKENS".
Note: If max_tokens is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
NONE is specified, the model will be forced not to use one of the specified tools, and give a direct response.
* If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not.
* Note: This parameter is only compatible with models Command-r7b and newer.
- *Note: The same functionality can be achieved in /v1/chat using the force_single_step parameter. If force_single_step=true, this is equivalent to specifying REQUIRED. While if force_single_step=true and tool_results are passed, this is equivalent to specifying NONE.
Used to control whether or not the model will be forced to use a tool when answering. When REQUIRED is specified, the model will be forced to use at least one of the user-defined tools, and the tools parameter must be passed in the request.
* When NONE is specified, the model will be forced not to use one of the specified tools, and give a direct response.
* If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not.
Note: This parameter is only compatible with models Command-r7b and newer.
- *Note: The same functionality can be achieved in /v1/chat using the force_single_step parameter. If force_single_step=true, this is equivalent to specifying REQUIRED. While if force_single_step=true and tool_results are passed, this is equivalent to specifying NONE.
NONE is specified, the model will be forced not to use one of the specified tools, and give a direct response.
* If tool_choice isn't specified, then the model is free to choose whether to use the specified tools or not.
* Note: This parameter is only compatible with models Command-r7b and newer.
- *Note: The same functionality can be achieved in /v1/chat using the force_single_step parameter. If force_single_step=true, this is equivalent to specifying REQUIRED. While if force_single_step=true and tool_results are passed, this is equivalent to specifying NONE.
The maximum number of tokens the model will generate as part of the response.
- *Note: Setting a low value may result in incomplete generations.
+ *The maximum number of output tokens the model will generate in the response. If not set, max_tokens defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the model documentation.
Note: Setting a low value may result in incomplete generations. In such cases, the finish_reason field in the response will be set to "MAX_TOKENS".
Note: If max_tokens is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
The maximum number of tokens the model will generate as part of the response.
- *Note: Setting a low value may result in incomplete generations.
+ *The maximum number of output tokens the model will generate in the response. If not set, max_tokens defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the model documentation.
Note: Setting a low value may result in incomplete generations. In such cases, the finish_reason field in the response will be set to "MAX_TOKENS".
Note: If max_tokens is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
When enabled, the user's prompt will be sent to the model without - * any pre-processing.
- *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- * @return Reference to {@code this} so that method calls can be chained together. - */ - @java.lang.Override - public _FinalStage rawPrompting(Boolean rawPrompting) { - this.rawPrompting = Optional.ofNullable(rawPrompting); - return this; - } - - /** - *When enabled, the user's prompt will be sent to the model without - * any pre-processing.
- *Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
- */ - @java.lang.Override - @JsonSetter(value = "raw_prompting", nulls = Nulls.SKIP) - public _FinalStage rawPrompting(Optional"uint8": Use this when you want to get back unsigned int8 embeddings. Supported with Embed v3.0 and newer Embed models."binary": Use this when you want to get back signed binary embeddings. Supported with Embed v3.0 and newer Embed models."ubinary": Use this when you want to get back unsigned binary embeddings. Supported with Embed v3.0 and newer Embed models."base64": Use this when you want to get back base64 embeddings. Supported with Embed v3.0 and newer Embed models."uint8": Use this when you want to get back unsigned int8 embeddings. Supported with Embed v3.0 and newer Embed models."binary": Use this when you want to get back signed binary embeddings. Supported with Embed v3.0 and newer Embed models."ubinary": Use this when you want to get back unsigned binary embeddings. Supported with Embed v3.0 and newer Embed models."base64": Use this when you want to get back base64 embeddings. Supported with Embed v3.0 and newer Embed models."uint8": Use this when you want to get back unsigned int8 embeddings. Supported with Embed v3.0 and newer Embed models."binary": Use this when you want to get back signed binary embeddings. Supported with Embed v3.0 and newer Embed models."ubinary": Use this when you want to get back unsigned binary embeddings. Supported with Embed v3.0 and newer Embed models."base64": Use this when you want to get back base64 embeddings. Supported with Embed v3.0 and newer Embed models."uint8": Use this when you want to get back unsigned int8 embeddings. Supported with Embed v3.0 and newer Embed models."binary": Use this when you want to get back signed binary embeddings. Supported with Embed v3.0 and newer Embed models."ubinary": Use this when you want to get back unsigned binary embeddings. Supported with Embed v3.0 and newer Embed models."base64": Use this when you want to get back base64 embeddings. Supported with Embed v3.0 and newer Embed models.