Skip to content

🌿 Fern Regeneration -- August 8, 2025 #253

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions src/api/client/requests/ChatRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,13 @@ export interface ChatRequest {
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
*/
presencePenalty?: number;
/**
* When enabled, the user's prompt will be sent to the model without
* any pre-processing.
*
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
*/
rawPrompting?: boolean;
/**
* A list of available tools (functions) that the model may suggest invoking before producing a text response.
*
Expand Down
7 changes: 7 additions & 0 deletions src/api/client/requests/ChatStreamRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,13 @@ export interface ChatStreamRequest {
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
*/
presencePenalty?: number;
/**
* When enabled, the user's prompt will be sent to the model without
* any pre-processing.
*
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
*/
rawPrompting?: boolean;
/**
* A list of available tools (functions) that the model may suggest invoking before producing a text response.
*
Expand Down
13 changes: 11 additions & 2 deletions src/api/resources/v2/client/requests/V2ChatRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -127,9 +127,11 @@ export interface V2ChatRequest {
*/
safetyMode?: Cohere.V2ChatRequestSafetyMode;
/**
* The maximum number of tokens the model will generate as part of the response.
* The maximum number of output tokens the model will generate in the response. If not set, `max_tokens` defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the [model documentation](https://docs.cohere.com/docs/models).
*
* **Note**: Setting a low value may result in incomplete generations.
* **Note**: Setting a low value may result in incomplete generations. In such cases, the `finish_reason` field in the response will be set to `"MAX_TOKENS"`.
*
* **Note**: If `max_tokens` is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
*/
maxTokens?: number;
/** A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. */
Expand Down Expand Up @@ -182,4 +184,11 @@ export interface V2ChatRequest {
*/
toolChoice?: Cohere.V2ChatRequestToolChoice;
thinking?: Cohere.Thinking;
/**
* When enabled, the user's prompt will be sent to the model without
* any pre-processing.
*
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
*/
rawPrompting?: boolean;
}
13 changes: 11 additions & 2 deletions src/api/resources/v2/client/requests/V2ChatStreamRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -114,9 +114,11 @@ export interface V2ChatStreamRequest {
*/
safetyMode?: Cohere.V2ChatStreamRequestSafetyMode;
/**
* The maximum number of tokens the model will generate as part of the response.
* The maximum number of output tokens the model will generate in the response. If not set, `max_tokens` defaults to the model's maximum output token limit. You can find the maximum output token limits for each model in the [model documentation](https://docs.cohere.com/docs/models).
*
* **Note**: Setting a low value may result in incomplete generations.
* **Note**: Setting a low value may result in incomplete generations. In such cases, the `finish_reason` field in the response will be set to `"MAX_TOKENS"`.
*
* **Note**: If `max_tokens` is set higher than the model's maximum output token limit, the generation will be capped at that model-specific maximum limit.
*/
maxTokens?: number;
/** A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. */
Expand Down Expand Up @@ -169,4 +171,11 @@ export interface V2ChatStreamRequest {
*/
toolChoice?: Cohere.V2ChatStreamRequestToolChoice;
thinking?: Cohere.Thinking;
/**
* When enabled, the user's prompt will be sent to the model without
* any pre-processing.
*
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
*/
rawPrompting?: boolean;
}
4 changes: 4 additions & 0 deletions src/api/types/ChatMessageEndEventDelta.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,11 @@
* This file was auto-generated by Fern from our API Definition.
*/

import * as Cohere from "../index";

export interface ChatMessageEndEventDelta {
/** An error message if an error occurred during the generation. */
error?: string;
finishReason?: Cohere.ChatFinishReason;
usage?: Cohere.Usage;
}
2 changes: 2 additions & 0 deletions src/serialization/client/requests/ChatRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ export const ChatRequest: core.serialization.Schema<
),
frequencyPenalty: core.serialization.property("frequency_penalty", core.serialization.number().optional()),
presencePenalty: core.serialization.property("presence_penalty", core.serialization.number().optional()),
rawPrompting: core.serialization.property("raw_prompting", core.serialization.boolean().optional()),
tools: core.serialization.list(Tool).optional(),
toolResults: core.serialization.property("tool_results", core.serialization.list(ToolResult).optional()),
forceSingleStep: core.serialization.property("force_single_step", core.serialization.boolean().optional()),
Expand Down Expand Up @@ -69,6 +70,7 @@ export declare namespace ChatRequest {
stop_sequences?: string[] | null;
frequency_penalty?: number | null;
presence_penalty?: number | null;
raw_prompting?: boolean | null;
tools?: Tool.Raw[] | null;
tool_results?: ToolResult.Raw[] | null;
force_single_step?: boolean | null;
Expand Down
2 changes: 2 additions & 0 deletions src/serialization/client/requests/ChatStreamRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ export const ChatStreamRequest: core.serialization.Schema<
),
frequencyPenalty: core.serialization.property("frequency_penalty", core.serialization.number().optional()),
presencePenalty: core.serialization.property("presence_penalty", core.serialization.number().optional()),
rawPrompting: core.serialization.property("raw_prompting", core.serialization.boolean().optional()),
tools: core.serialization.list(Tool).optional(),
toolResults: core.serialization.property("tool_results", core.serialization.list(ToolResult).optional()),
forceSingleStep: core.serialization.property("force_single_step", core.serialization.boolean().optional()),
Expand Down Expand Up @@ -69,6 +70,7 @@ export declare namespace ChatStreamRequest {
stop_sequences?: string[] | null;
frequency_penalty?: number | null;
presence_penalty?: number | null;
raw_prompting?: boolean | null;
tools?: Tool.Raw[] | null;
tool_results?: ToolResult.Raw[] | null;
force_single_step?: boolean | null;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ export const V2ChatRequest: core.serialization.Schema<serializers.V2ChatRequest.
logprobs: core.serialization.boolean().optional(),
toolChoice: core.serialization.property("tool_choice", V2ChatRequestToolChoice.optional()),
thinking: Thinking.optional(),
rawPrompting: core.serialization.property("raw_prompting", core.serialization.boolean().optional()),
});

export declare namespace V2ChatRequest {
Expand All @@ -62,5 +63,6 @@ export declare namespace V2ChatRequest {
logprobs?: boolean | null;
tool_choice?: V2ChatRequestToolChoice.Raw | null;
thinking?: Thinking.Raw | null;
raw_prompting?: boolean | null;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ export const V2ChatStreamRequest: core.serialization.Schema<
logprobs: core.serialization.boolean().optional(),
toolChoice: core.serialization.property("tool_choice", V2ChatStreamRequestToolChoice.optional()),
thinking: Thinking.optional(),
rawPrompting: core.serialization.property("raw_prompting", core.serialization.boolean().optional()),
});

export declare namespace V2ChatStreamRequest {
Expand All @@ -64,5 +65,6 @@ export declare namespace V2ChatStreamRequest {
logprobs?: boolean | null;
tool_choice?: V2ChatStreamRequestToolChoice.Raw | null;
thinking?: Thinking.Raw | null;
raw_prompting?: boolean | null;
}
}
6 changes: 6 additions & 0 deletions src/serialization/types/ChatMessageEndEventDelta.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,22 @@
import * as serializers from "../index";
import * as Cohere from "../../api/index";
import * as core from "../../core";
import { ChatFinishReason } from "./ChatFinishReason";
import { Usage } from "./Usage";

export const ChatMessageEndEventDelta: core.serialization.ObjectSchema<
serializers.ChatMessageEndEventDelta.Raw,
Cohere.ChatMessageEndEventDelta
> = core.serialization.object({
error: core.serialization.string().optional(),
finishReason: core.serialization.property("finish_reason", ChatFinishReason.optional()),
usage: Usage.optional(),
});

export declare namespace ChatMessageEndEventDelta {
export interface Raw {
error?: string | null;
finish_reason?: ChatFinishReason.Raw | null;
usage?: Usage.Raw | null;
}
}