diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 763462f..43fd5a7 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -9,9 +9,7 @@
"postCreateCommand": "yarn install",
"customizations": {
"vscode": {
- "extensions": [
- "esbenp.prettier-vscode"
- ]
+ "extensions": ["esbenp.prettier-vscode"]
}
}
}
diff --git a/.gitignore b/.gitignore
index d98d51a..2412bb7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,4 +7,5 @@ dist
dist-deno
/*.tgz
.idea/
+.eslintcache
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index ed9acd2..a1e0736 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.2.23-alpha.1"
+ ".": "0.4.0-alpha.1"
}
diff --git a/.stats.yml b/.stats.yml
index fa9edfc..60e64c3 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-f252873ea1e1f38fd207331ef2621c511154d5be3f4076e59cc15754fc58eee4.yml
-openapi_spec_hash: 10cbb4337a06a9fdd7d08612dd6044c3
-config_hash: 0358112cc0f3d880b4d55debdbe1cfa3
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-35c6569e5e9fcc85084c9728eb7fc7c5908297fcc77043d621d25de3c850a990.yml
+openapi_spec_hash: 0f95bbeee16f3205d36ec34cfa62c711
+config_hash: ef275cc002a89629459fd73d0cf9cba9
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e40a318..4ca481e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,73 @@
# Changelog
+## 0.4.0-alpha.1 (2025-10-31)
+
+Full Changelog: [v0.2.23-alpha.1...v0.4.0-alpha.1](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.2.23-alpha.1...v0.4.0-alpha.1)
+
+### ⚠ BREAKING CHANGES
+
+* **api:** /v1/inspect only lists v1 apis by default
+* **api:** /v1/inspect only lists v1 apis by default
+* **api:** use input_schema instead of parameters for tools
+* **api:** fixes to remove deprecated inference resources
+
+### Features
+
+* **api:** Adding prompts API to stainless config ([5ab8d74](https://github.com/llamastack/llama-stack-client-typescript/commit/5ab8d7423f6a9c26453b36c9daee99d343993d4b))
+* **api:** expires_after changes for /files ([a0b0fb7](https://github.com/llamastack/llama-stack-client-typescript/commit/a0b0fb7aa74668f3f6996c178f9654723b8b0f22))
+* **api:** fix file batches SDK to list_files ([25a0f10](https://github.com/llamastack/llama-stack-client-typescript/commit/25a0f10cffa7de7f1457d65c97259911bc70ab0a))
+* **api:** fixes to remove deprecated inference resources ([367d775](https://github.com/llamastack/llama-stack-client-typescript/commit/367d775c3d5a2fd85bf138d2b175e91b7c185913))
+* **api:** fixes to URLs ([e4f7840](https://github.com/llamastack/llama-stack-client-typescript/commit/e4f78407f74f3ba7597de355c314e1932dd94761))
+* **api:** manual updates ([7d2e375](https://github.com/llamastack/llama-stack-client-typescript/commit/7d2e375bde7bd04ae58cc49fcd5ab7b134b25640))
+* **api:** manual updates ([0302d54](https://github.com/llamastack/llama-stack-client-typescript/commit/0302d54398d87127ab0e9221a8a92760123d235b))
+* **api:** manual updates ([98a596f](https://github.com/llamastack/llama-stack-client-typescript/commit/98a596f677fe2790e4b4765362aa19b6cff8b97e))
+* **api:** manual updates ([c6fb0b6](https://github.com/llamastack/llama-stack-client-typescript/commit/c6fb0b67d8f2e641c13836a17400e51df0b029f1))
+* **api:** manual updates??! ([4dda064](https://github.com/llamastack/llama-stack-client-typescript/commit/4dda06489f003860e138f396c253b40de01103b6))
+* **api:** move datasets to beta, vector_db -> vector_store ([f32c0be](https://github.com/llamastack/llama-stack-client-typescript/commit/f32c0becb1ec0d66129b7fcaa06de3323ee703da))
+* **api:** move post_training and eval under alpha namespace ([aec1d5f](https://github.com/llamastack/llama-stack-client-typescript/commit/aec1d5ff198473ba736bf543ad00c6626cab9b81))
+* **api:** moving { rerank, agents } to `client.alpha.` ([793e069](https://github.com/llamastack/llama-stack-client-typescript/commit/793e0694d75c2af4535bf991d5858cd1f21300b4))
+* **api:** removing openai/v1 ([b5432de](https://github.com/llamastack/llama-stack-client-typescript/commit/b5432de2ad56ff0d2fd5a5b8e1755b5237616b60))
+* **api:** SDKs for vector store file batches ([b0676c8](https://github.com/llamastack/llama-stack-client-typescript/commit/b0676c837bbd835276fea3fe12f435afdbb75ef7))
+* **api:** SDKs for vector store file batches apis ([88731bf](https://github.com/llamastack/llama-stack-client-typescript/commit/88731bfecd6f548ae79cbe2a1125620e488c42a3))
+* **api:** several updates including Conversations, Responses changes, etc. ([e0728d5](https://github.com/llamastack/llama-stack-client-typescript/commit/e0728d5dd59be8723d9f967d6164351eb05528d1))
+* **api:** sync ([7d85013](https://github.com/llamastack/llama-stack-client-typescript/commit/7d850139d1327a215312a82c98b3428ebc7e5f68))
+* **api:** tool api (input_schema, etc.) changes ([06f2bca](https://github.com/llamastack/llama-stack-client-typescript/commit/06f2bcaf0df2e5d462cbe2d9ef3704ab0cfe9248))
+* **api:** updates to vector_store, etc. ([19535c2](https://github.com/llamastack/llama-stack-client-typescript/commit/19535c27147bf6f6861b807d9eeee471b5625148))
+* **api:** updating post /v1/files to have correct multipart/form-data ([f1cf9d6](https://github.com/llamastack/llama-stack-client-typescript/commit/f1cf9d68b6b2569dfb5ea3e2d2c33eff1a832e47))
+* **api:** use input_schema instead of parameters for tools ([8910a12](https://github.com/llamastack/llama-stack-client-typescript/commit/8910a121146aeddcb8f400101e6a2232245097e0))
+* **api:** vector_db_id -> vector_store_id ([079d89d](https://github.com/llamastack/llama-stack-client-typescript/commit/079d89d6522cb4f2eed5e5a09962d94ad800e883))
+
+
+### Bug Fixes
+
+* **api:** another fix to capture correct responses.create() params ([6acae91](https://github.com/llamastack/llama-stack-client-typescript/commit/6acae910db289080e8f52864f1bdf6d7951d1c3b))
+* **api:** fix the ToolDefParam updates ([5cee3d6](https://github.com/llamastack/llama-stack-client-typescript/commit/5cee3d69650a4c827e12fc046c1d2ec3b2fa9126))
+* **client:** incorrect offset pagination check ([257285f](https://github.com/llamastack/llama-stack-client-typescript/commit/257285f33bb989c9040580dd24251d05f9657bb0))
+* fix stream event model reference ([a71b421](https://github.com/llamastack/llama-stack-client-typescript/commit/a71b421152a609e49e76d01c6e4dd46eb3dbfae0))
+
+
+### Chores
+
+* **api:** /v1/inspect only lists v1 apis by default ([ae3dc95](https://github.com/llamastack/llama-stack-client-typescript/commit/ae3dc95964c908d219b23d7166780eaab6003ef5))
+* **api:** /v1/inspect only lists v1 apis by default ([e30f51c](https://github.com/llamastack/llama-stack-client-typescript/commit/e30f51c704c39129092255c040bbf5ad90ed0b07))
+* extract some types in mcp docs ([dcc7bb8](https://github.com/llamastack/llama-stack-client-typescript/commit/dcc7bb8b4d940982c2e9c6d1a541636e99fdc5ff))
+* fix readme example ([402f930](https://github.com/llamastack/llama-stack-client-typescript/commit/402f9301d033bb230c9714104fbfa554f3f7cd8f))
+* fix readme examples ([4d5517c](https://github.com/llamastack/llama-stack-client-typescript/commit/4d5517c2b9af2eb6994f5e4b2c033c95d268fb5c))
+* **internal:** codegen related update ([252e0a2](https://github.com/llamastack/llama-stack-client-typescript/commit/252e0a2a38bd8aedab91b401c440a9b10c056cec))
+* **internal:** codegen related update ([34da720](https://github.com/llamastack/llama-stack-client-typescript/commit/34da720c34c35dafb38775243d28dfbdce2497db))
+* **internal:** fix incremental formatting in some cases ([c5c8292](https://github.com/llamastack/llama-stack-client-typescript/commit/c5c8292b631c678efff5498bbab9f5a43bee50b6))
+* **internal:** use npm pack for build uploads ([a246793](https://github.com/llamastack/llama-stack-client-typescript/commit/a24679300cff93fea8ad4bc85e549ecc88198d58))
+
+
+### Documentation
+
+* update examples ([17b9eb3](https://github.com/llamastack/llama-stack-client-typescript/commit/17b9eb3c40957b63d2a71f7fc21944abcc720d80))
+
+
+### Build System
+
+* Bump version to 0.2.23 ([16e05ed](https://github.com/llamastack/llama-stack-client-typescript/commit/16e05ed9798233375e19098992632d223c3f5d8d))
+
## 0.2.23-alpha.1 (2025-09-26)
Full Changelog: [v0.2.19-alpha.1...v0.2.23-alpha.1](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.2.19-alpha.1...v0.2.23-alpha.1)
diff --git a/README.md b/README.md
index af2743d..d9f1b2f 100644
--- a/README.md
+++ b/README.md
@@ -41,13 +41,13 @@ import LlamaStackClient from 'llama-stack-client';
const client = new LlamaStackClient();
-const stream = await client.inference.chatCompletion({
+const stream = await client.chat.completions.create({
messages: [{ content: 'string', role: 'user' }],
- model_id: 'model_id',
+ model: 'model',
stream: true,
});
-for await (const chatCompletionResponseStreamChunk of stream) {
- console.log(chatCompletionResponseStreamChunk.completion_message);
+for await (const chatCompletionChunk of stream) {
+ console.log(chatCompletionChunk);
}
```
@@ -64,11 +64,11 @@ import LlamaStackClient from 'llama-stack-client';
const client = new LlamaStackClient();
-const params: LlamaStackClient.InferenceChatCompletionParams = {
+const params: LlamaStackClient.Chat.CompletionCreateParams = {
messages: [{ content: 'string', role: 'user' }],
- model_id: 'model_id',
+ model: 'model',
};
-const chatCompletionResponse: LlamaStackClient.ChatCompletionResponse = await client.inference.chatCompletion(
+const completion: LlamaStackClient.Chat.CompletionCreateResponse = await client.chat.completions.create(
params,
);
```
@@ -113,8 +113,8 @@ a subclass of `APIError` will be thrown:
```ts
-const chatCompletionResponse = await client.inference
- .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' })
+const completion = await client.chat.completions
+ .create({ messages: [{ content: 'string', role: 'user' }], model: 'model' })
.catch(async (err) => {
if (err instanceof LlamaStackClient.APIError) {
console.log(err.status); // 400
@@ -155,7 +155,7 @@ const client = new LlamaStackClient({
});
// Or, configure per-request:
-await client.inference.chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }, {
+await client.chat.completions.create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }, {
maxRetries: 5,
});
```
@@ -172,7 +172,7 @@ const client = new LlamaStackClient({
});
// Override per-request:
-await client.inference.chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }, {
+await client.chat.completions.create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }, {
timeout: 5 * 1000,
});
```
@@ -193,17 +193,17 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi
```ts
const client = new LlamaStackClient();
-const response = await client.inference
- .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' })
+const response = await client.chat.completions
+ .create({ messages: [{ content: 'string', role: 'user' }], model: 'model' })
.asResponse();
console.log(response.headers.get('X-My-Header'));
console.log(response.statusText); // access the underlying Response object
-const { data: chatCompletionResponse, response: raw } = await client.inference
- .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' })
+const { data: completion, response: raw } = await client.chat.completions
+ .create({ messages: [{ content: 'string', role: 'user' }], model: 'model' })
.withResponse();
console.log(raw.headers.get('X-My-Header'));
-console.log(chatCompletionResponse.completion_message);
+console.log(completion);
```
### Making custom/undocumented requests
@@ -307,8 +307,8 @@ const client = new LlamaStackClient({
});
// Override per-request:
-await client.inference.chatCompletion(
- { messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' },
+await client.chat.completions.create(
+ { messages: [{ content: 'string', role: 'user' }], model: 'model' },
{
httpAgent: new http.Agent({ keepAlive: false }),
},
diff --git a/api.md b/api.md
index 01d88a5..dd30175 100644
--- a/api.md
+++ b/api.md
@@ -3,18 +3,13 @@
Types:
- AgentConfig
-- BatchCompletion
-- ChatCompletionResponse
- CompletionMessage
-- ContentDelta
- Document
- InterleavedContent
- InterleavedContentItem
- Message
-- Metric
- ParamType
- QueryConfig
-- QueryGeneratorConfig
- QueryResult
- ResponseFormat
- SafetyViolation
@@ -22,7 +17,6 @@ Types:
- ScoringResult
- SystemMessage
- ToolCall
-- ToolParamDefinition
- ToolResponseMessage
- UserMessage
@@ -45,14 +39,12 @@ Methods:
Types:
-- ListToolsResponse
-- Tool
- ToolListResponse
Methods:
- client.tools.list({ ...params }) -> ToolListResponse
-- client.tools.get(toolName) -> Tool
+- client.tools.get(toolName) -> ToolDef
# ToolRuntime
@@ -85,10 +77,10 @@ Types:
Methods:
-- client.responses.create({ ...params }) -> ResponseObject
-- client.responses.retrieve(responseId) -> ResponseObject
-- client.responses.list({ ...params }) -> ResponseListResponsesOpenAICursorPage
-- client.responses.delete(responseId) -> ResponseDeleteResponse
+- client.responses.create({ ...params }) -> ResponseObject
+- client.responses.retrieve(responseId) -> ResponseObject
+- client.responses.list({ ...params }) -> ResponseListResponsesOpenAICursorPage
+- client.responses.delete(responseId) -> ResponseDeleteResponse
## InputItems
@@ -98,110 +90,58 @@ Types:
Methods:
-- client.responses.inputItems.list(responseId, { ...params }) -> InputItemListResponse
+- client.responses.inputItems.list(responseId, { ...params }) -> InputItemListResponse
-# Agents
+# Prompts
Types:
-- InferenceStep
-- MemoryRetrievalStep
-- ShieldCallStep
-- ToolExecutionStep
-- ToolResponse
-- AgentCreateResponse
-- AgentRetrieveResponse
-- AgentListResponse
+- ListPromptsResponse
+- Prompt
+- PromptListResponse
Methods:
-- client.agents.create({ ...params }) -> AgentCreateResponse
-- client.agents.retrieve(agentId) -> AgentRetrieveResponse
-- client.agents.list({ ...params }) -> AgentListResponse
-- client.agents.delete(agentId) -> void
+- client.prompts.create({ ...params }) -> Prompt
+- client.prompts.retrieve(promptId, { ...params }) -> Prompt
+- client.prompts.update(promptId, { ...params }) -> Prompt
+- client.prompts.list() -> PromptListResponse
+- client.prompts.delete(promptId) -> void
+- client.prompts.setDefaultVersion(promptId, { ...params }) -> Prompt
-## Session
-
-Types:
-
-- Session
-- SessionCreateResponse
-- SessionListResponse
-
-Methods:
-
-- client.agents.session.create(agentId, { ...params }) -> SessionCreateResponse
-- client.agents.session.retrieve(agentId, sessionId, { ...params }) -> Session
-- client.agents.session.list(agentId, { ...params }) -> SessionListResponse
-- client.agents.session.delete(agentId, sessionId) -> void
-
-## Steps
-
-Types:
-
-- StepRetrieveResponse
+## Versions
Methods:
-- client.agents.steps.retrieve(agentId, sessionId, turnId, stepId) -> StepRetrieveResponse
+- client.prompts.versions.list(promptId) -> PromptListResponse
-## Turn
+# Conversations
Types:
-- AgentTurnResponseStreamChunk
-- Turn
-- TurnResponseEvent
-- TurnResponseEventPayload
+- ConversationObject
+- ConversationDeleteResponse
Methods:
-- client.agents.turn.create(agentId, sessionId, { ...params }) -> Turn
-- client.agents.turn.retrieve(agentId, sessionId, turnId) -> Turn
-- client.agents.turn.resume(agentId, sessionId, turnId, { ...params }) -> Turn
+- client.conversations.create({ ...params }) -> ConversationObject
+- client.conversations.retrieve(conversationId) -> ConversationObject
+- client.conversations.update(conversationId, { ...params }) -> ConversationObject
+- client.conversations.delete(conversationId) -> ConversationDeleteResponse
-# Datasets
+## Items
Types:
-- ListDatasetsResponse
-- DatasetRetrieveResponse
-- DatasetListResponse
-- DatasetIterrowsResponse
-- DatasetRegisterResponse
+- ItemCreateResponse
+- ItemListResponse
+- ItemGetResponse
Methods:
-- client.datasets.retrieve(datasetId) -> DatasetRetrieveResponse
-- client.datasets.list() -> DatasetListResponse
-- client.datasets.appendrows(datasetId, { ...params }) -> void
-- client.datasets.iterrows(datasetId, { ...params }) -> DatasetIterrowsResponse
-- client.datasets.register({ ...params }) -> DatasetRegisterResponse
-- client.datasets.unregister(datasetId) -> void
-
-# Eval
-
-Types:
-
-- BenchmarkConfig
-- EvalCandidate
-- EvaluateResponse
-- Job
-
-Methods:
-
-- client.eval.evaluateRows(benchmarkId, { ...params }) -> EvaluateResponse
-- client.eval.evaluateRowsAlpha(benchmarkId, { ...params }) -> EvaluateResponse
-- client.eval.runEval(benchmarkId, { ...params }) -> Job
-- client.eval.runEvalAlpha(benchmarkId, { ...params }) -> Job
-
-## Jobs
-
-Methods:
-
-- client.eval.jobs.retrieve(benchmarkId, jobId) -> EvaluateResponse
-- client.eval.jobs.cancel(benchmarkId, jobId) -> void
-- client.eval.jobs.status(benchmarkId, jobId) -> Job
+- client.conversations.items.create(conversationId, { ...params }) -> ItemCreateResponse
+- client.conversations.items.list(conversationId, { ...params }) -> ItemListResponsesOpenAICursorPage
+- client.conversations.items.get(conversationId, itemId) -> ItemGetResponse
# Inspect
@@ -217,26 +157,6 @@ Methods:
- client.inspect.health() -> HealthInfo
- client.inspect.version() -> VersionInfo
-# Inference
-
-Types:
-
-- ChatCompletionResponseStreamChunk
-- CompletionResponse
-- EmbeddingsResponse
-- TokenLogProbs
-- InferenceBatchChatCompletionResponse
-- InferenceRerankResponse
-
-Methods:
-
-- client.inference.batchChatCompletion({ ...params }) -> InferenceBatchChatCompletionResponse
-- client.inference.batchCompletion({ ...params }) -> BatchCompletion
-- client.inference.chatCompletion({ ...params }) -> ChatCompletionResponse
-- client.inference.completion({ ...params }) -> CompletionResponse
-- client.inference.embeddings({ ...params }) -> EmbeddingsResponse
-- client.inference.rerank({ ...params }) -> InferenceRerankResponse
-
# Embeddings
Types:
@@ -245,7 +165,7 @@ Types:
Methods:
-- client.embeddings.create({ ...params }) -> CreateEmbeddingsResponse
+- client.embeddings.create({ ...params }) -> CreateEmbeddingsResponse
# Chat
@@ -263,9 +183,9 @@ Types:
Methods:
-- client.chat.completions.create({ ...params }) -> CompletionCreateResponse
-- client.chat.completions.retrieve(completionId) -> CompletionRetrieveResponse
-- client.chat.completions.list({ ...params }) -> CompletionListResponsesOpenAICursorPage
+- client.chat.completions.create({ ...params }) -> CompletionCreateResponse
+- client.chat.completions.retrieve(completionId) -> CompletionRetrieveResponse
+- client.chat.completions.list({ ...params }) -> CompletionListResponsesOpenAICursorPage
# Completions
@@ -275,7 +195,7 @@ Types:
Methods:
-- client.completions.create({ ...params }) -> CompletionCreateResponse
+- client.completions.create({ ...params }) -> CompletionCreateResponse
# VectorIo
@@ -288,22 +208,6 @@ Methods:
- client.vectorIo.insert({ ...params }) -> void
- client.vectorIo.query({ ...params }) -> QueryChunksResponse
-# VectorDBs
-
-Types:
-
-- ListVectorDBsResponse
-- VectorDBRetrieveResponse
-- VectorDBListResponse
-- VectorDBRegisterResponse
-
-Methods:
-
-- client.vectorDBs.retrieve(vectorDBId) -> VectorDBRetrieveResponse
-- client.vectorDBs.list() -> VectorDBListResponse
-- client.vectorDBs.register({ ...params }) -> VectorDBRegisterResponse
-- client.vectorDBs.unregister(vectorDBId) -> void
-
# VectorStores
Types:
@@ -315,12 +219,12 @@ Types:
Methods:
-- client.vectorStores.create({ ...params }) -> VectorStore
-- client.vectorStores.retrieve(vectorStoreId) -> VectorStore
-- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore
-- client.vectorStores.list({ ...params }) -> VectorStoresOpenAICursorPage
-- client.vectorStores.delete(vectorStoreId) -> VectorStoreDeleteResponse
-- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponse
+- client.vectorStores.create({ ...params }) -> VectorStore
+- client.vectorStores.retrieve(vectorStoreId) -> VectorStore
+- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore
+- client.vectorStores.list({ ...params }) -> VectorStoresOpenAICursorPage
+- client.vectorStores.delete(vectorStoreId) -> VectorStoreDeleteResponse
+- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponse
## Files
@@ -332,12 +236,26 @@ Types:
Methods:
-- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile
-- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile
-- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile
-- client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesOpenAICursorPage
-- client.vectorStores.files.delete(vectorStoreId, fileId) -> FileDeleteResponse
-- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponse
+- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile
+- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile
+- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile
+- client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesOpenAICursorPage
+- client.vectorStores.files.delete(vectorStoreId, fileId) -> FileDeleteResponse
+- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponse
+
+## FileBatches
+
+Types:
+
+- ListVectorStoreFilesInBatchResponse
+- VectorStoreFileBatches
+
+Methods:
+
+- client.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatches
+- client.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatches
+- client.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatches
+- client.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesOpenAICursorPage
# Models
@@ -356,41 +274,9 @@ Methods:
## OpenAI
-Types:
-
-- OpenAIListResponse
-
-Methods:
-
-- client.models.openai.list() -> OpenAIListResponse
-
-# PostTraining
-
-Types:
-
-- AlgorithmConfig
-- ListPostTrainingJobsResponse
-- PostTrainingJob
-
-Methods:
-
-- client.postTraining.preferenceOptimize({ ...params }) -> PostTrainingJob
-- client.postTraining.supervisedFineTune({ ...params }) -> PostTrainingJob
-
-## Job
-
-Types:
-
-- JobListResponse
-- JobArtifactsResponse
-- JobStatusResponse
-
Methods:
-- client.postTraining.job.list() -> Array<ListPostTrainingJobsResponse.Data>
-- client.postTraining.job.artifacts({ ...params }) -> JobArtifactsResponse
-- client.postTraining.job.cancel({ ...params }) -> void
-- client.postTraining.job.status({ ...params }) -> JobStatusResponse
+- client.models.openai.list() -> ModelListResponse
# Providers
@@ -423,7 +309,7 @@ Types:
Methods:
-- client.moderations.create({ ...params }) -> CreateResponse
+- client.moderations.create({ ...params }) -> CreateResponse
# Safety
@@ -460,32 +346,6 @@ Methods:
- client.syntheticDataGeneration.generate({ ...params }) -> SyntheticDataGenerationResponse
-# Telemetry
-
-Types:
-
-- Event
-- QueryCondition
-- QuerySpansResponse
-- SpanWithStatus
-- Trace
-- TelemetryGetSpanResponse
-- TelemetryGetSpanTreeResponse
-- TelemetryQueryMetricsResponse
-- TelemetryQuerySpansResponse
-- TelemetryQueryTracesResponse
-
-Methods:
-
-- client.telemetry.getSpan(traceId, spanId) -> TelemetryGetSpanResponse
-- client.telemetry.getSpanTree(spanId, { ...params }) -> TelemetryGetSpanTreeResponse
-- client.telemetry.getTrace(traceId) -> Trace
-- client.telemetry.logEvent({ ...params }) -> void
-- client.telemetry.queryMetrics(metricName, { ...params }) -> TelemetryQueryMetricsResponse
-- client.telemetry.querySpans({ ...params }) -> TelemetryQuerySpansResponse
-- client.telemetry.queryTraces({ ...params }) -> TelemetryQueryTracesResponse
-- client.telemetry.saveSpansToDataset({ ...params }) -> void
-
# Scoring
Types:
@@ -513,33 +373,176 @@ Methods:
- client.scoringFunctions.list() -> ScoringFunctionListResponse
- client.scoringFunctions.register({ ...params }) -> void
-# Benchmarks
+# Files
Types:
-- Benchmark
-- ListBenchmarksResponse
-- BenchmarkListResponse
+- DeleteFileResponse
+- File
+- ListFilesResponse
+- FileContentResponse
Methods:
-- client.benchmarks.retrieve(benchmarkId) -> Benchmark
-- client.benchmarks.list() -> BenchmarkListResponse
-- client.benchmarks.register({ ...params }) -> void
+- client.files.create({ ...params }) -> File
+- client.files.retrieve(fileId) -> File
+- client.files.list({ ...params }) -> FilesOpenAICursorPage
+- client.files.delete(fileId) -> DeleteFileResponse
+- client.files.content(fileId) -> unknown
-# Files
+# Alpha
+
+## Inference
Types:
-- DeleteFileResponse
-- File
-- ListFilesResponse
-- FileContentResponse
+- InferenceRerankResponse
+
+Methods:
+
+- client.alpha.inference.rerank({ ...params }) -> InferenceRerankResponse
+
+## PostTraining
+
+Types:
+
+- AlgorithmConfig
+- ListPostTrainingJobsResponse
+- PostTrainingJob
+
+Methods:
+
+- client.alpha.postTraining.preferenceOptimize({ ...params }) -> PostTrainingJob
+- client.alpha.postTraining.supervisedFineTune({ ...params }) -> PostTrainingJob
+
+### Job
+
+Types:
+
+- JobListResponse
+- JobArtifactsResponse
+- JobStatusResponse
+
+Methods:
+
+- client.alpha.postTraining.job.list() -> JobListResponse
+- client.alpha.postTraining.job.artifacts({ ...params }) -> JobArtifactsResponse
+- client.alpha.postTraining.job.cancel({ ...params }) -> void
+- client.alpha.postTraining.job.status({ ...params }) -> JobStatusResponse
+
+## Benchmarks
+
+Types:
+
+- Benchmark
+- ListBenchmarksResponse
+- BenchmarkListResponse
+
+Methods:
+
+- client.alpha.benchmarks.retrieve(benchmarkId) -> Benchmark
+- client.alpha.benchmarks.list() -> BenchmarkListResponse
+- client.alpha.benchmarks.register({ ...params }) -> void
+
+## Eval
+
+Types:
+
+- BenchmarkConfig
+- EvaluateResponse
+- Job
+
+Methods:
+
+- client.alpha.eval.evaluateRows(benchmarkId, { ...params }) -> EvaluateResponse
+- client.alpha.eval.evaluateRowsAlpha(benchmarkId, { ...params }) -> EvaluateResponse
+- client.alpha.eval.runEval(benchmarkId, { ...params }) -> Job
+- client.alpha.eval.runEvalAlpha(benchmarkId, { ...params }) -> Job
+
+### Jobs
+
+Methods:
+
+- client.alpha.eval.jobs.retrieve(benchmarkId, jobId) -> EvaluateResponse
+- client.alpha.eval.jobs.cancel(benchmarkId, jobId) -> void
+- client.alpha.eval.jobs.status(benchmarkId, jobId) -> Job
+
+## Agents
+
+Types:
+
+- InferenceStep
+- MemoryRetrievalStep
+- ShieldCallStep
+- ToolExecutionStep
+- ToolResponse
+- AgentCreateResponse
+- AgentRetrieveResponse
+- AgentListResponse
+
+Methods:
+
+- client.alpha.agents.create({ ...params }) -> AgentCreateResponse
+- client.alpha.agents.retrieve(agentId) -> AgentRetrieveResponse
+- client.alpha.agents.list({ ...params }) -> AgentListResponse
+- client.alpha.agents.delete(agentId) -> void
+
+### Session
+
+Types:
+
+- Session
+- SessionCreateResponse
+- SessionListResponse
+
+Methods:
+
+- client.alpha.agents.session.create(agentId, { ...params }) -> SessionCreateResponse
+- client.alpha.agents.session.retrieve(agentId, sessionId, { ...params }) -> Session
+- client.alpha.agents.session.list(agentId, { ...params }) -> SessionListResponse
+- client.alpha.agents.session.delete(agentId, sessionId) -> void
+
+### Steps
+
+Types:
+
+- StepRetrieveResponse
+
+Methods:
+
+- client.alpha.agents.steps.retrieve(agentId, sessionId, turnId, stepId) -> StepRetrieveResponse
+
+### Turn
+
+Types:
+
+- AgentTurnResponseStreamChunk
+- Turn
+- TurnResponseEvent
+
+Methods:
+
+- client.alpha.agents.turn.create(agentId, sessionId, { ...params }) -> Turn
+- client.alpha.agents.turn.retrieve(agentId, sessionId, turnId) -> Turn
+- client.alpha.agents.turn.resume(agentId, sessionId, turnId, { ...params }) -> Turn
+
+# Beta
+
+## Datasets
+
+Types:
+
+- ListDatasetsResponse
+- DatasetRetrieveResponse
+- DatasetListResponse
+- DatasetIterrowsResponse
+- DatasetRegisterResponse
Methods:
-- client.files.create({ ...params }) -> File
-- client.files.retrieve(fileId) -> File
-- client.files.list({ ...params }) -> FilesOpenAICursorPage
-- client.files.delete(fileId) -> DeleteFileResponse
-- client.files.content(fileId) -> unknown
+- client.beta.datasets.retrieve(datasetId) -> DatasetRetrieveResponse
+- client.beta.datasets.list() -> DatasetListResponse
+- client.beta.datasets.appendrows(datasetId, { ...params }) -> void
+- client.beta.datasets.iterrows(datasetId, { ...params }) -> DatasetIterrowsResponse
+- client.beta.datasets.register({ ...params }) -> DatasetRegisterResponse
+- client.beta.datasets.unregister(datasetId) -> void
diff --git a/package.json b/package.json
index 60f74e7..9aeedec 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "llama-stack-client",
- "version": "0.3.0",
+ "version": "0.4.0-alpha.1",
"description": "The official TypeScript library for the Llama Stack Client API",
"author": "Llama Stack Client ",
"types": "dist/index.d.ts",
diff --git a/release-please-config.json b/release-please-config.json
index 624ed99..1ebd0bd 100644
--- a/release-please-config.json
+++ b/release-please-config.json
@@ -60,8 +60,5 @@
}
],
"release-type": "node",
- "extra-files": [
- "src/version.ts",
- "README.md"
- ]
+ "extra-files": ["src/version.ts", "README.md"]
}
diff --git a/scripts/fast-format b/scripts/fast-format
index 03fb1a3..8a8e9d5 100755
--- a/scripts/fast-format
+++ b/scripts/fast-format
@@ -35,6 +35,6 @@ echo "==> Running prettier --write"
PRETTIER_FILES="$(grep '\.\(js\|json\)$' "$FILE_LIST" || true)"
if ! [ -z "$PRETTIER_FILES" ]; then
echo "$PRETTIER_FILES" | xargs ./node_modules/.bin/prettier \
- --write --cache --cache-strategy metadata \
+ --write --cache --cache-strategy metadata --no-error-on-unmatched-pattern \
'!**/dist' '!**/*.ts' '!**/*.mts' '!**/*.cts' '!**/*.js' '!**/*.mjs' '!**/*.cjs'
fi
diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh
index 4e16419..6738930 100755
--- a/scripts/utils/upload-artifact.sh
+++ b/scripts/utils/upload-artifact.sh
@@ -18,9 +18,11 @@ if [[ "$SIGNED_URL" == "null" ]]; then
exit 1
fi
-UPLOAD_RESPONSE=$(tar "${BASE_PATH:+-C$BASE_PATH}" -cz "${ARTIFACT_PATH:-dist}" | curl -v -X PUT \
+TARBALL=$(cd dist && npm pack --silent)
+
+UPLOAD_RESPONSE=$(curl -v -X PUT \
-H "Content-Type: application/gzip" \
- --data-binary @- "$SIGNED_URL" 2>&1)
+ --data-binary "@dist/$TARBALL" "$SIGNED_URL" 2>&1)
if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then
echo -e "\033[32mUploaded build to Stainless storage.\033[0m"
diff --git a/src/index.ts b/src/index.ts
index 14be9b7..85b3f74 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -19,13 +19,6 @@ import {
} from './pagination';
import * as Uploads from './uploads';
import * as API from './resources/index';
-import {
- Benchmark,
- BenchmarkListResponse,
- BenchmarkRegisterParams,
- Benchmarks,
- ListBenchmarksResponse,
-} from './resources/benchmarks';
import {
CompletionCreateParams,
CompletionCreateParamsNonStreaming,
@@ -33,17 +26,6 @@ import {
CompletionCreateResponse,
Completions,
} from './resources/completions';
-import {
- DatasetAppendrowsParams,
- DatasetIterrowsParams,
- DatasetIterrowsResponse,
- DatasetListResponse,
- DatasetRegisterParams,
- DatasetRegisterResponse,
- DatasetRetrieveResponse,
- Datasets,
- ListDatasetsResponse,
-} from './resources/datasets';
import { CreateEmbeddingsResponse, EmbeddingCreateParams, Embeddings } from './resources/embeddings';
import {
DeleteFileResponse,
@@ -55,25 +37,6 @@ import {
FilesOpenAICursorPage,
ListFilesResponse,
} from './resources/files';
-import {
- ChatCompletionResponseStreamChunk,
- CompletionResponse,
- EmbeddingsResponse,
- Inference,
- InferenceBatchChatCompletionParams,
- InferenceBatchChatCompletionResponse,
- InferenceBatchCompletionParams,
- InferenceChatCompletionParams,
- InferenceChatCompletionParamsNonStreaming,
- InferenceChatCompletionParamsStreaming,
- InferenceCompletionParams,
- InferenceCompletionParamsNonStreaming,
- InferenceCompletionParamsStreaming,
- InferenceEmbeddingsParams,
- InferenceRerankParams,
- InferenceRerankResponse,
- TokenLogProbs,
-} from './resources/inference';
import { HealthInfo, Inspect, ProviderInfo, RouteInfo, VersionInfo } from './resources/inspect';
import { CreateResponse, ModerationCreateParams, Moderations } from './resources/moderations';
import { ListProvidersResponse, ProviderListResponse, Providers } from './resources/providers';
@@ -106,25 +69,6 @@ import {
SyntheticDataGenerationGenerateParams,
SyntheticDataGenerationResponse,
} from './resources/synthetic-data-generation';
-import {
- Event,
- QueryCondition,
- QuerySpansResponse,
- SpanWithStatus,
- Telemetry,
- TelemetryGetSpanResponse,
- TelemetryGetSpanTreeParams,
- TelemetryGetSpanTreeResponse,
- TelemetryLogEventParams,
- TelemetryQueryMetricsParams,
- TelemetryQueryMetricsResponse,
- TelemetryQuerySpansParams,
- TelemetryQuerySpansResponse,
- TelemetryQueryTracesParams,
- TelemetryQueryTracesResponse,
- TelemetrySaveSpansToDatasetParams,
- Trace,
-} from './resources/telemetry';
import {
ListToolGroupsResponse,
ToolGroup,
@@ -132,46 +76,23 @@ import {
ToolgroupRegisterParams,
Toolgroups,
} from './resources/toolgroups';
-import { ListToolsResponse, Tool, ToolListParams, ToolListResponse, Tools } from './resources/tools';
-import {
- ListVectorDBsResponse,
- VectorDBListResponse,
- VectorDBRegisterParams,
- VectorDBRegisterResponse,
- VectorDBRetrieveResponse,
- VectorDBs,
-} from './resources/vector-dbs';
+import { ToolListParams, ToolListResponse, Tools } from './resources/tools';
import {
QueryChunksResponse,
VectorIo,
VectorIoInsertParams,
VectorIoQueryParams,
} from './resources/vector-io';
-import {
- AgentCreateParams,
- AgentCreateResponse,
- AgentListParams,
- AgentListResponse,
- AgentRetrieveResponse,
- Agents,
- InferenceStep,
- MemoryRetrievalStep,
- ShieldCallStep,
- ToolExecutionStep,
- ToolResponse,
-} from './resources/agents/agents';
+import { Alpha } from './resources/alpha/alpha';
+import { Beta } from './resources/beta/beta';
import { Chat, ChatCompletionChunk } from './resources/chat/chat';
import {
- BenchmarkConfig,
- Eval,
- EvalCandidate,
- EvalEvaluateRowsAlphaParams,
- EvalEvaluateRowsParams,
- EvalRunEvalAlphaParams,
- EvalRunEvalParams,
- EvaluateResponse,
- Job,
-} from './resources/eval/eval';
+ ConversationCreateParams,
+ ConversationDeleteResponse,
+ ConversationObject,
+ ConversationUpdateParams,
+ Conversations,
+} from './resources/conversations/conversations';
import {
ListModelsResponse,
Model,
@@ -180,13 +101,15 @@ import {
Models,
} from './resources/models/models';
import {
- AlgorithmConfig,
- ListPostTrainingJobsResponse,
- PostTraining,
- PostTrainingJob,
- PostTrainingPreferenceOptimizeParams,
- PostTrainingSupervisedFineTuneParams,
-} from './resources/post-training/post-training';
+ ListPromptsResponse,
+ Prompt,
+ PromptCreateParams,
+ PromptListResponse,
+ PromptRetrieveParams,
+ PromptSetDefaultVersionParams,
+ PromptUpdateParams,
+ Prompts,
+} from './resources/prompts/prompts';
import {
ResponseCreateParams,
ResponseCreateParamsNonStreaming,
@@ -334,30 +257,26 @@ export class LlamaStackClient extends Core.APIClient {
tools: API.Tools = new API.Tools(this);
toolRuntime: API.ToolRuntime = new API.ToolRuntime(this);
responses: API.Responses = new API.Responses(this);
- agents: API.Agents = new API.Agents(this);
- datasets: API.Datasets = new API.Datasets(this);
- eval: API.Eval = new API.Eval(this);
+ prompts: API.Prompts = new API.Prompts(this);
+ conversations: API.Conversations = new API.Conversations(this);
inspect: API.Inspect = new API.Inspect(this);
- inference: API.Inference = new API.Inference(this);
embeddings: API.Embeddings = new API.Embeddings(this);
chat: API.Chat = new API.Chat(this);
completions: API.Completions = new API.Completions(this);
vectorIo: API.VectorIo = new API.VectorIo(this);
- vectorDBs: API.VectorDBs = new API.VectorDBs(this);
vectorStores: API.VectorStores = new API.VectorStores(this);
models: API.Models = new API.Models(this);
- postTraining: API.PostTraining = new API.PostTraining(this);
providers: API.Providers = new API.Providers(this);
routes: API.Routes = new API.Routes(this);
moderations: API.Moderations = new API.Moderations(this);
safety: API.Safety = new API.Safety(this);
shields: API.Shields = new API.Shields(this);
syntheticDataGeneration: API.SyntheticDataGeneration = new API.SyntheticDataGeneration(this);
- telemetry: API.Telemetry = new API.Telemetry(this);
scoring: API.Scoring = new API.Scoring(this);
scoringFunctions: API.ScoringFunctions = new API.ScoringFunctions(this);
- benchmarks: API.Benchmarks = new API.Benchmarks(this);
files: API.Files = new API.Files(this);
+ alpha: API.Alpha = new API.Alpha(this);
+ beta: API.Beta = new API.Beta(this);
/**
* Check whether the base URL is set to its default.
@@ -414,32 +333,28 @@ LlamaStackClient.Tools = Tools;
LlamaStackClient.ToolRuntime = ToolRuntime;
LlamaStackClient.Responses = Responses;
LlamaStackClient.ResponseListResponsesOpenAICursorPage = ResponseListResponsesOpenAICursorPage;
-LlamaStackClient.Agents = Agents;
-LlamaStackClient.Datasets = Datasets;
-LlamaStackClient.Eval = Eval;
+LlamaStackClient.Prompts = Prompts;
+LlamaStackClient.Conversations = Conversations;
LlamaStackClient.Inspect = Inspect;
-LlamaStackClient.Inference = Inference;
LlamaStackClient.Embeddings = Embeddings;
LlamaStackClient.Chat = Chat;
LlamaStackClient.Completions = Completions;
LlamaStackClient.VectorIo = VectorIo;
-LlamaStackClient.VectorDBs = VectorDBs;
LlamaStackClient.VectorStores = VectorStores;
LlamaStackClient.VectorStoresOpenAICursorPage = VectorStoresOpenAICursorPage;
LlamaStackClient.Models = Models;
-LlamaStackClient.PostTraining = PostTraining;
LlamaStackClient.Providers = Providers;
LlamaStackClient.Routes = Routes;
LlamaStackClient.Moderations = Moderations;
LlamaStackClient.Safety = Safety;
LlamaStackClient.Shields = Shields;
LlamaStackClient.SyntheticDataGeneration = SyntheticDataGeneration;
-LlamaStackClient.Telemetry = Telemetry;
LlamaStackClient.Scoring = Scoring;
LlamaStackClient.ScoringFunctions = ScoringFunctions;
-LlamaStackClient.Benchmarks = Benchmarks;
LlamaStackClient.Files = Files;
LlamaStackClient.FilesOpenAICursorPage = FilesOpenAICursorPage;
+LlamaStackClient.Alpha = Alpha;
+LlamaStackClient.Beta = Beta;
export declare namespace LlamaStackClient {
export type RequestOptions = Core.RequestOptions;
@@ -464,13 +379,7 @@ export declare namespace LlamaStackClient {
type ToolgroupRegisterParams as ToolgroupRegisterParams,
};
- export {
- Tools as Tools,
- type ListToolsResponse as ListToolsResponse,
- type Tool as Tool,
- type ToolListResponse as ToolListResponse,
- type ToolListParams as ToolListParams,
- };
+ export { Tools as Tools, type ToolListResponse as ToolListResponse, type ToolListParams as ToolListParams };
export {
ToolRuntime as ToolRuntime,
@@ -495,41 +404,22 @@ export declare namespace LlamaStackClient {
};
export {
- Agents as Agents,
- type InferenceStep as InferenceStep,
- type MemoryRetrievalStep as MemoryRetrievalStep,
- type ShieldCallStep as ShieldCallStep,
- type ToolExecutionStep as ToolExecutionStep,
- type ToolResponse as ToolResponse,
- type AgentCreateResponse as AgentCreateResponse,
- type AgentRetrieveResponse as AgentRetrieveResponse,
- type AgentListResponse as AgentListResponse,
- type AgentCreateParams as AgentCreateParams,
- type AgentListParams as AgentListParams,
- };
-
- export {
- Datasets as Datasets,
- type ListDatasetsResponse as ListDatasetsResponse,
- type DatasetRetrieveResponse as DatasetRetrieveResponse,
- type DatasetListResponse as DatasetListResponse,
- type DatasetIterrowsResponse as DatasetIterrowsResponse,
- type DatasetRegisterResponse as DatasetRegisterResponse,
- type DatasetAppendrowsParams as DatasetAppendrowsParams,
- type DatasetIterrowsParams as DatasetIterrowsParams,
- type DatasetRegisterParams as DatasetRegisterParams,
+ Prompts as Prompts,
+ type ListPromptsResponse as ListPromptsResponse,
+ type Prompt as Prompt,
+ type PromptListResponse as PromptListResponse,
+ type PromptCreateParams as PromptCreateParams,
+ type PromptRetrieveParams as PromptRetrieveParams,
+ type PromptUpdateParams as PromptUpdateParams,
+ type PromptSetDefaultVersionParams as PromptSetDefaultVersionParams,
};
export {
- Eval as Eval,
- type BenchmarkConfig as BenchmarkConfig,
- type EvalCandidate as EvalCandidate,
- type EvaluateResponse as EvaluateResponse,
- type Job as Job,
- type EvalEvaluateRowsParams as EvalEvaluateRowsParams,
- type EvalEvaluateRowsAlphaParams as EvalEvaluateRowsAlphaParams,
- type EvalRunEvalParams as EvalRunEvalParams,
- type EvalRunEvalAlphaParams as EvalRunEvalAlphaParams,
+ Conversations as Conversations,
+ type ConversationObject as ConversationObject,
+ type ConversationDeleteResponse as ConversationDeleteResponse,
+ type ConversationCreateParams as ConversationCreateParams,
+ type ConversationUpdateParams as ConversationUpdateParams,
};
export {
@@ -540,26 +430,6 @@ export declare namespace LlamaStackClient {
type VersionInfo as VersionInfo,
};
- export {
- Inference as Inference,
- type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk,
- type CompletionResponse as CompletionResponse,
- type EmbeddingsResponse as EmbeddingsResponse,
- type TokenLogProbs as TokenLogProbs,
- type InferenceBatchChatCompletionResponse as InferenceBatchChatCompletionResponse,
- type InferenceRerankResponse as InferenceRerankResponse,
- type InferenceBatchChatCompletionParams as InferenceBatchChatCompletionParams,
- type InferenceBatchCompletionParams as InferenceBatchCompletionParams,
- type InferenceChatCompletionParams as InferenceChatCompletionParams,
- type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming,
- type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming,
- type InferenceCompletionParams as InferenceCompletionParams,
- type InferenceCompletionParamsNonStreaming as InferenceCompletionParamsNonStreaming,
- type InferenceCompletionParamsStreaming as InferenceCompletionParamsStreaming,
- type InferenceEmbeddingsParams as InferenceEmbeddingsParams,
- type InferenceRerankParams as InferenceRerankParams,
- };
-
export {
Embeddings as Embeddings,
type CreateEmbeddingsResponse as CreateEmbeddingsResponse,
@@ -583,15 +453,6 @@ export declare namespace LlamaStackClient {
type VectorIoQueryParams as VectorIoQueryParams,
};
- export {
- VectorDBs as VectorDBs,
- type ListVectorDBsResponse as ListVectorDBsResponse,
- type VectorDBRetrieveResponse as VectorDBRetrieveResponse,
- type VectorDBListResponse as VectorDBListResponse,
- type VectorDBRegisterResponse as VectorDBRegisterResponse,
- type VectorDBRegisterParams as VectorDBRegisterParams,
- };
-
export {
VectorStores as VectorStores,
type ListVectorStoresResponse as ListVectorStoresResponse,
@@ -613,15 +474,6 @@ export declare namespace LlamaStackClient {
type ModelRegisterParams as ModelRegisterParams,
};
- export {
- PostTraining as PostTraining,
- type AlgorithmConfig as AlgorithmConfig,
- type ListPostTrainingJobsResponse as ListPostTrainingJobsResponse,
- type PostTrainingJob as PostTrainingJob,
- type PostTrainingPreferenceOptimizeParams as PostTrainingPreferenceOptimizeParams,
- type PostTrainingSupervisedFineTuneParams as PostTrainingSupervisedFineTuneParams,
- };
-
export {
Providers as Providers,
type ListProvidersResponse as ListProvidersResponse,
@@ -660,26 +512,6 @@ export declare namespace LlamaStackClient {
type SyntheticDataGenerationGenerateParams as SyntheticDataGenerationGenerateParams,
};
- export {
- Telemetry as Telemetry,
- type Event as Event,
- type QueryCondition as QueryCondition,
- type QuerySpansResponse as QuerySpansResponse,
- type SpanWithStatus as SpanWithStatus,
- type Trace as Trace,
- type TelemetryGetSpanResponse as TelemetryGetSpanResponse,
- type TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse,
- type TelemetryQueryMetricsResponse as TelemetryQueryMetricsResponse,
- type TelemetryQuerySpansResponse as TelemetryQuerySpansResponse,
- type TelemetryQueryTracesResponse as TelemetryQueryTracesResponse,
- type TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams,
- type TelemetryLogEventParams as TelemetryLogEventParams,
- type TelemetryQueryMetricsParams as TelemetryQueryMetricsParams,
- type TelemetryQuerySpansParams as TelemetryQuerySpansParams,
- type TelemetryQueryTracesParams as TelemetryQueryTracesParams,
- type TelemetrySaveSpansToDatasetParams as TelemetrySaveSpansToDatasetParams,
- };
-
export {
Scoring as Scoring,
type ScoringScoreResponse as ScoringScoreResponse,
@@ -697,14 +529,6 @@ export declare namespace LlamaStackClient {
type ScoringFunctionRegisterParams as ScoringFunctionRegisterParams,
};
- export {
- Benchmarks as Benchmarks,
- type Benchmark as Benchmark,
- type ListBenchmarksResponse as ListBenchmarksResponse,
- type BenchmarkListResponse as BenchmarkListResponse,
- type BenchmarkRegisterParams as BenchmarkRegisterParams,
- };
-
export {
Files as Files,
type DeleteFileResponse as DeleteFileResponse,
@@ -716,19 +540,18 @@ export declare namespace LlamaStackClient {
type FileListParams as FileListParams,
};
+ export { Alpha as Alpha };
+
+ export { Beta as Beta };
+
export type AgentConfig = API.AgentConfig;
- export type BatchCompletion = API.BatchCompletion;
- export type ChatCompletionResponse = API.ChatCompletionResponse;
export type CompletionMessage = API.CompletionMessage;
- export type ContentDelta = API.ContentDelta;
export type Document = API.Document;
export type InterleavedContent = API.InterleavedContent;
export type InterleavedContentItem = API.InterleavedContentItem;
export type Message = API.Message;
- export type Metric = API.Metric;
export type ParamType = API.ParamType;
export type QueryConfig = API.QueryConfig;
- export type QueryGeneratorConfig = API.QueryGeneratorConfig;
export type QueryResult = API.QueryResult;
export type ResponseFormat = API.ResponseFormat;
export type SafetyViolation = API.SafetyViolation;
@@ -736,7 +559,6 @@ export declare namespace LlamaStackClient {
export type ScoringResult = API.ScoringResult;
export type SystemMessage = API.SystemMessage;
export type ToolCall = API.ToolCall;
- export type ToolParamDefinition = API.ToolParamDefinition;
export type ToolResponseMessage = API.ToolResponseMessage;
export type UserMessage = API.UserMessage;
}
diff --git a/src/pagination.ts b/src/pagination.ts
index 11b6313..b68497e 100644
--- a/src/pagination.ts
+++ b/src/pagination.ts
@@ -54,11 +54,7 @@ export class DatasetsIterrows- extends AbstractPage
- implements Datase
}
nextPageInfo(): PageInfo | null {
- const offset = this.next_index;
- if (!offset) {
- return null;
- }
-
+ const offset = this.next_index ?? 0;
const length = this.getPaginatedItems().length;
const currentCount = offset + length;
diff --git a/src/resources/alpha.ts b/src/resources/alpha.ts
new file mode 100644
index 0000000..446b643
--- /dev/null
+++ b/src/resources/alpha.ts
@@ -0,0 +1,3 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export * from './alpha/index';
diff --git a/src/resources/agents.ts b/src/resources/alpha/agents.ts
similarity index 100%
rename from src/resources/agents.ts
rename to src/resources/alpha/agents.ts
diff --git a/src/resources/agents/agents.ts b/src/resources/alpha/agents/agents.ts
similarity index 93%
rename from src/resources/agents/agents.ts
rename to src/resources/alpha/agents/agents.ts
index e7be733..f5bedad 100644
--- a/src/resources/agents/agents.ts
+++ b/src/resources/alpha/agents/agents.ts
@@ -6,10 +6,10 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import { isRequestOptions } from '../../core';
-import * as Core from '../../core';
-import * as Shared from '../shared';
+import { APIResource } from '../../../resource';
+import { isRequestOptions } from '../../../core';
+import * as Core from '../../../core';
+import * as Shared from '../../shared';
import * as SessionAPI from './session';
import {
Session,
@@ -31,7 +31,6 @@ import {
TurnCreateParamsStreaming,
TurnResource,
TurnResponseEvent,
- TurnResponseEventPayload,
TurnResumeParams,
TurnResumeParamsNonStreaming,
TurnResumeParamsStreaming,
@@ -46,14 +45,14 @@ export class Agents extends APIResource {
* Create an agent with the given configuration.
*/
create(body: AgentCreateParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/agents', { body, ...options });
+ return this._client.post('/v1alpha/agents', { body, ...options });
}
/**
* Describe an agent by its ID.
*/
retrieve(agentId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/agents/${agentId}`, options);
+ return this._client.get(`/v1alpha/agents/${agentId}`, options);
}
/**
@@ -68,14 +67,14 @@ export class Agents extends APIResource {
if (isRequestOptions(query)) {
return this.list({}, query);
}
- return this._client.get('/v1/agents', { query, ...options });
+ return this._client.get('/v1alpha/agents', { query, ...options });
}
/**
* Delete an agent by its ID and its associated sessions and turns.
*/
delete(agentId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/agents/${agentId}`, {
+ return this._client.delete(`/v1alpha/agents/${agentId}`, {
...options,
headers: { Accept: '*/*', ...options?.headers },
});
@@ -144,7 +143,7 @@ export interface MemoryRetrievalStep {
/**
* The IDs of the vector databases to retrieve context from.
*/
- vector_db_ids: string;
+ vector_store_ids: string;
/**
* The time the step completed.
@@ -361,7 +360,6 @@ export declare namespace Agents {
type AgentTurnResponseStreamChunk as AgentTurnResponseStreamChunk,
type Turn as Turn,
type TurnResponseEvent as TurnResponseEvent,
- type TurnResponseEventPayload as TurnResponseEventPayload,
type TurnCreateParams as TurnCreateParams,
type TurnCreateParamsNonStreaming as TurnCreateParamsNonStreaming,
type TurnCreateParamsStreaming as TurnCreateParamsStreaming,
diff --git a/src/resources/agents/index.ts b/src/resources/alpha/agents/index.ts
similarity index 97%
rename from src/resources/agents/index.ts
rename to src/resources/alpha/agents/index.ts
index d98c2b8..bbaa188 100644
--- a/src/resources/agents/index.ts
+++ b/src/resources/alpha/agents/index.ts
@@ -34,7 +34,6 @@ export {
type AgentTurnResponseStreamChunk,
type Turn,
type TurnResponseEvent,
- type TurnResponseEventPayload,
type TurnCreateParams,
type TurnCreateParamsNonStreaming,
type TurnCreateParamsStreaming,
diff --git a/src/resources/agents/session.ts b/src/resources/alpha/agents/session.ts
similarity index 88%
rename from src/resources/agents/session.ts
rename to src/resources/alpha/agents/session.ts
index a0c8cb1..4ccb7f6 100644
--- a/src/resources/agents/session.ts
+++ b/src/resources/alpha/agents/session.ts
@@ -6,9 +6,9 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import { isRequestOptions } from '../../core';
-import * as Core from '../../core';
+import { APIResource } from '../../../resource';
+import { isRequestOptions } from '../../../core';
+import * as Core from '../../../core';
import * as TurnAPI from './turn';
export class SessionResource extends APIResource {
@@ -20,7 +20,7 @@ export class SessionResource extends APIResource {
body: SessionCreateParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post(`/v1/agents/${agentId}/session`, { body, ...options });
+ return this._client.post(`/v1alpha/agents/${agentId}/session`, { body, ...options });
}
/**
@@ -42,7 +42,7 @@ export class SessionResource extends APIResource {
if (isRequestOptions(query)) {
return this.retrieve(agentId, sessionId, {}, query);
}
- return this._client.get(`/v1/agents/${agentId}/session/${sessionId}`, { query, ...options });
+ return this._client.get(`/v1alpha/agents/${agentId}/session/${sessionId}`, { query, ...options });
}
/**
@@ -62,14 +62,14 @@ export class SessionResource extends APIResource {
if (isRequestOptions(query)) {
return this.list(agentId, {}, query);
}
- return this._client.get(`/v1/agents/${agentId}/sessions`, { query, ...options });
+ return this._client.get(`/v1alpha/agents/${agentId}/sessions`, { query, ...options });
}
/**
* Delete an agent session by its ID and its associated turns.
*/
delete(agentId: string, sessionId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/agents/${agentId}/session/${sessionId}`, {
+ return this._client.delete(`/v1alpha/agents/${agentId}/session/${sessionId}`, {
...options,
headers: { Accept: '*/*', ...options?.headers },
});
diff --git a/src/resources/agents/steps.ts b/src/resources/alpha/agents/steps.ts
similarity index 85%
rename from src/resources/agents/steps.ts
rename to src/resources/alpha/agents/steps.ts
index f4eaece..decbc4e 100644
--- a/src/resources/agents/steps.ts
+++ b/src/resources/alpha/agents/steps.ts
@@ -6,8 +6,8 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import * as Core from '../../core';
+import { APIResource } from '../../../resource';
+import * as Core from '../../../core';
import * as AgentsAPI from './agents';
export class Steps extends APIResource {
@@ -22,7 +22,7 @@ export class Steps extends APIResource {
options?: Core.RequestOptions,
): Core.APIPromise {
return this._client.get(
- `/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}/step/${stepId}`,
+ `/v1alpha/agents/${agentId}/session/${sessionId}/turn/${turnId}/step/${stepId}`,
options,
);
}
diff --git a/src/resources/agents/turn.ts b/src/resources/alpha/agents/turn.ts
similarity index 87%
rename from src/resources/agents/turn.ts
rename to src/resources/alpha/agents/turn.ts
index 660b8ea..10dd9b6 100644
--- a/src/resources/agents/turn.ts
+++ b/src/resources/alpha/agents/turn.ts
@@ -6,13 +6,13 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import { APIPromise } from '../../core';
-import * as Core from '../../core';
+import { APIResource } from '../../../resource';
+import { APIPromise } from '../../../core';
+import * as Core from '../../../core';
import * as TurnAPI from './turn';
-import * as Shared from '../shared';
+import * as Shared from '../../shared';
import * as AgentsAPI from './agents';
-import { Stream } from '../../streaming';
+import { Stream } from '../../../streaming';
export class TurnResource extends APIResource {
/**
@@ -42,7 +42,7 @@ export class TurnResource extends APIResource {
body: TurnCreateParams,
options?: Core.RequestOptions,
): APIPromise | APIPromise> {
- return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn`, {
+ return this._client.post(`/v1alpha/agents/${agentId}/session/${sessionId}/turn`, {
body,
...options,
stream: body.stream ?? false,
@@ -58,7 +58,7 @@ export class TurnResource extends APIResource {
turnId: string,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.get(`/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}`, options);
+ return this._client.get(`/v1alpha/agents/${agentId}/session/${sessionId}/turn/${turnId}`, options);
}
/**
@@ -95,7 +95,7 @@ export class TurnResource extends APIResource {
body: TurnResumeParams,
options?: Core.RequestOptions,
): APIPromise | APIPromise> {
- return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}/resume`, {
+ return this._client.post(`/v1alpha/agents/${agentId}/session/${sessionId}/turn/${turnId}/resume`, {
body,
...options,
stream: body.stream ?? false,
@@ -265,21 +265,16 @@ export interface TurnResponseEvent {
/**
* Event-specific payload containing event data
*/
- payload: TurnResponseEventPayload;
+ payload:
+ | TurnResponseEvent.AgentTurnResponseStepStartPayload
+ | TurnResponseEvent.AgentTurnResponseStepProgressPayload
+ | TurnResponseEvent.AgentTurnResponseStepCompletePayload
+ | TurnResponseEvent.AgentTurnResponseTurnStartPayload
+ | TurnResponseEvent.AgentTurnResponseTurnCompletePayload
+ | TurnResponseEvent.AgentTurnResponseTurnAwaitingInputPayload;
}
-/**
- * Payload for step start events in agent turn responses.
- */
-export type TurnResponseEventPayload =
- | TurnResponseEventPayload.AgentTurnResponseStepStartPayload
- | TurnResponseEventPayload.AgentTurnResponseStepProgressPayload
- | TurnResponseEventPayload.AgentTurnResponseStepCompletePayload
- | TurnResponseEventPayload.AgentTurnResponseTurnStartPayload
- | TurnResponseEventPayload.AgentTurnResponseTurnCompletePayload
- | TurnResponseEventPayload.AgentTurnResponseTurnAwaitingInputPayload;
-
-export namespace TurnResponseEventPayload {
+export namespace TurnResponseEvent {
/**
* Payload for step start events in agent turn responses.
*/
@@ -312,7 +307,10 @@ export namespace TurnResponseEventPayload {
/**
* Incremental content changes during step execution
*/
- delta: Shared.ContentDelta;
+ delta:
+ | AgentTurnResponseStepProgressPayload.TextDelta
+ | AgentTurnResponseStepProgressPayload.ImageDelta
+ | AgentTurnResponseStepProgressPayload.ToolCallDelta;
/**
* Type of event being reported
@@ -330,6 +328,58 @@ export namespace TurnResponseEventPayload {
step_type: 'inference' | 'tool_execution' | 'shield_call' | 'memory_retrieval';
}
+ export namespace AgentTurnResponseStepProgressPayload {
+ /**
+ * A text content delta for streaming responses.
+ */
+ export interface TextDelta {
+ /**
+ * The incremental text content
+ */
+ text: string;
+
+ /**
+ * Discriminator type of the delta. Always "text"
+ */
+ type: 'text';
+ }
+
+ /**
+ * An image content delta for streaming responses.
+ */
+ export interface ImageDelta {
+ /**
+ * The incremental image data as bytes
+ */
+ image: string;
+
+ /**
+ * Discriminator type of the delta. Always "image"
+ */
+ type: 'image';
+ }
+
+ /**
+ * A tool call content delta for streaming responses.
+ */
+ export interface ToolCallDelta {
+ /**
+ * Current parsing status of the tool call
+ */
+ parse_status: 'started' | 'in_progress' | 'failed' | 'succeeded';
+
+ /**
+ * Either an in-progress tool call string or the final parsed tool call
+ */
+ tool_call: string | Shared.ToolCall;
+
+ /**
+ * Discriminator type of the delta. Always "tool_call"
+ */
+ type: 'tool_call';
+ }
+ }
+
/**
* Payload for step completion events in agent turn responses.
*/
@@ -627,7 +677,6 @@ export declare namespace TurnResource {
type AgentTurnResponseStreamChunk as AgentTurnResponseStreamChunk,
type Turn as Turn,
type TurnResponseEvent as TurnResponseEvent,
- type TurnResponseEventPayload as TurnResponseEventPayload,
type TurnCreateParams as TurnCreateParams,
type TurnCreateParamsNonStreaming as TurnCreateParamsNonStreaming,
type TurnCreateParamsStreaming as TurnCreateParamsStreaming,
diff --git a/src/resources/alpha/alpha.ts b/src/resources/alpha/alpha.ts
new file mode 100644
index 0000000..ad44259
--- /dev/null
+++ b/src/resources/alpha/alpha.ts
@@ -0,0 +1,111 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import * as BenchmarksAPI from './benchmarks';
+import {
+ Benchmark,
+ BenchmarkListResponse,
+ BenchmarkRegisterParams,
+ Benchmarks,
+ ListBenchmarksResponse,
+} from './benchmarks';
+import * as InferenceAPI from './inference';
+import { Inference, InferenceRerankParams, InferenceRerankResponse } from './inference';
+import * as AgentsAPI from './agents/agents';
+import {
+ AgentCreateParams,
+ AgentCreateResponse,
+ AgentListParams,
+ AgentListResponse,
+ AgentRetrieveResponse,
+ Agents,
+ InferenceStep,
+ MemoryRetrievalStep,
+ ShieldCallStep,
+ ToolExecutionStep,
+ ToolResponse,
+} from './agents/agents';
+import * as EvalAPI from './eval/eval';
+import {
+ BenchmarkConfig,
+ Eval,
+ EvalEvaluateRowsAlphaParams,
+ EvalEvaluateRowsParams,
+ EvalRunEvalAlphaParams,
+ EvalRunEvalParams,
+ EvaluateResponse,
+ Job,
+} from './eval/eval';
+import * as PostTrainingAPI from './post-training/post-training';
+import {
+ AlgorithmConfig,
+ ListPostTrainingJobsResponse,
+ PostTraining,
+ PostTrainingJob,
+ PostTrainingPreferenceOptimizeParams,
+ PostTrainingSupervisedFineTuneParams,
+} from './post-training/post-training';
+
+export class Alpha extends APIResource {
+ inference: InferenceAPI.Inference = new InferenceAPI.Inference(this._client);
+ postTraining: PostTrainingAPI.PostTraining = new PostTrainingAPI.PostTraining(this._client);
+ benchmarks: BenchmarksAPI.Benchmarks = new BenchmarksAPI.Benchmarks(this._client);
+ eval: EvalAPI.Eval = new EvalAPI.Eval(this._client);
+ agents: AgentsAPI.Agents = new AgentsAPI.Agents(this._client);
+}
+
+Alpha.Inference = Inference;
+Alpha.PostTraining = PostTraining;
+Alpha.Benchmarks = Benchmarks;
+Alpha.Eval = Eval;
+Alpha.Agents = Agents;
+
+export declare namespace Alpha {
+ export {
+ Inference as Inference,
+ type InferenceRerankResponse as InferenceRerankResponse,
+ type InferenceRerankParams as InferenceRerankParams,
+ };
+
+ export {
+ PostTraining as PostTraining,
+ type AlgorithmConfig as AlgorithmConfig,
+ type ListPostTrainingJobsResponse as ListPostTrainingJobsResponse,
+ type PostTrainingJob as PostTrainingJob,
+ type PostTrainingPreferenceOptimizeParams as PostTrainingPreferenceOptimizeParams,
+ type PostTrainingSupervisedFineTuneParams as PostTrainingSupervisedFineTuneParams,
+ };
+
+ export {
+ Benchmarks as Benchmarks,
+ type Benchmark as Benchmark,
+ type ListBenchmarksResponse as ListBenchmarksResponse,
+ type BenchmarkListResponse as BenchmarkListResponse,
+ type BenchmarkRegisterParams as BenchmarkRegisterParams,
+ };
+
+ export {
+ Eval as Eval,
+ type BenchmarkConfig as BenchmarkConfig,
+ type EvaluateResponse as EvaluateResponse,
+ type Job as Job,
+ type EvalEvaluateRowsParams as EvalEvaluateRowsParams,
+ type EvalEvaluateRowsAlphaParams as EvalEvaluateRowsAlphaParams,
+ type EvalRunEvalParams as EvalRunEvalParams,
+ type EvalRunEvalAlphaParams as EvalRunEvalAlphaParams,
+ };
+
+ export {
+ Agents as Agents,
+ type InferenceStep as InferenceStep,
+ type MemoryRetrievalStep as MemoryRetrievalStep,
+ type ShieldCallStep as ShieldCallStep,
+ type ToolExecutionStep as ToolExecutionStep,
+ type ToolResponse as ToolResponse,
+ type AgentCreateResponse as AgentCreateResponse,
+ type AgentRetrieveResponse as AgentRetrieveResponse,
+ type AgentListResponse as AgentListResponse,
+ type AgentCreateParams as AgentCreateParams,
+ type AgentListParams as AgentListParams,
+ };
+}
diff --git a/src/resources/benchmarks.ts b/src/resources/alpha/benchmarks.ts
similarity index 88%
rename from src/resources/benchmarks.ts
rename to src/resources/alpha/benchmarks.ts
index f725c19..f2b4aba 100644
--- a/src/resources/benchmarks.ts
+++ b/src/resources/alpha/benchmarks.ts
@@ -6,15 +6,15 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../resource';
-import * as Core from '../core';
+import { APIResource } from '../../resource';
+import * as Core from '../../core';
export class Benchmarks extends APIResource {
/**
* Get a benchmark by its ID.
*/
retrieve(benchmarkId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/eval/benchmarks/${benchmarkId}`, options);
+ return this._client.get(`/v1alpha/eval/benchmarks/${benchmarkId}`, options);
}
/**
@@ -22,7 +22,9 @@ export class Benchmarks extends APIResource {
*/
list(options?: Core.RequestOptions): Core.APIPromise {
return (
- this._client.get('/v1/eval/benchmarks', options) as Core.APIPromise<{ data: BenchmarkListResponse }>
+ this._client.get('/v1alpha/eval/benchmarks', options) as Core.APIPromise<{
+ data: BenchmarkListResponse;
+ }>
)._thenUnwrap((obj) => obj.data);
}
@@ -30,7 +32,7 @@ export class Benchmarks extends APIResource {
* Register a benchmark.
*/
register(body: BenchmarkRegisterParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/eval/benchmarks', {
+ return this._client.post('/v1alpha/eval/benchmarks', {
body,
...options,
headers: { Accept: '*/*', ...options?.headers },
diff --git a/src/resources/eval.ts b/src/resources/alpha/eval.ts
similarity index 100%
rename from src/resources/eval.ts
rename to src/resources/alpha/eval.ts
diff --git a/src/resources/eval/eval.ts b/src/resources/alpha/eval/eval.ts
similarity index 84%
rename from src/resources/eval/eval.ts
rename to src/resources/alpha/eval/eval.ts
index eebbdb6..379a2c4 100644
--- a/src/resources/eval/eval.ts
+++ b/src/resources/alpha/eval/eval.ts
@@ -6,10 +6,10 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import * as Core from '../../core';
-import * as ScoringFunctionsAPI from '../scoring-functions';
-import * as Shared from '../shared';
+import { APIResource } from '../../../resource';
+import * as Core from '../../../core';
+import * as ScoringFunctionsAPI from '../../scoring-functions';
+import * as Shared from '../../shared';
import * as JobsAPI from './jobs';
import { Jobs } from './jobs';
@@ -24,7 +24,7 @@ export class Eval extends APIResource {
body: EvalEvaluateRowsParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options });
+ return this._client.post(`/v1alpha/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options });
}
/**
@@ -35,14 +35,14 @@ export class Eval extends APIResource {
body: EvalEvaluateRowsAlphaParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options });
+ return this._client.post(`/v1alpha/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options });
}
/**
* Run an evaluation on a benchmark.
*/
runEval(benchmarkId: string, body: EvalRunEvalParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options });
+ return this._client.post(`/v1alpha/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options });
}
/**
@@ -53,7 +53,7 @@ export class Eval extends APIResource {
body: EvalRunEvalAlphaParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options });
+ return this._client.post(`/v1alpha/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options });
}
}
@@ -64,7 +64,7 @@ export interface BenchmarkConfig {
/**
* The candidate to evaluate.
*/
- eval_candidate: EvalCandidate;
+ eval_candidate: BenchmarkConfig.ModelCandidate | BenchmarkConfig.AgentCandidate;
/**
* Map between scoring function id and parameters for each scoring function you
@@ -79,12 +79,7 @@ export interface BenchmarkConfig {
num_examples?: number;
}
-/**
- * A model candidate for evaluation.
- */
-export type EvalCandidate = EvalCandidate.ModelCandidate | EvalCandidate.AgentCandidate;
-
-export namespace EvalCandidate {
+export namespace BenchmarkConfig {
/**
* A model candidate for evaluation.
*/
@@ -203,7 +198,6 @@ Eval.Jobs = Jobs;
export declare namespace Eval {
export {
type BenchmarkConfig as BenchmarkConfig,
- type EvalCandidate as EvalCandidate,
type EvaluateResponse as EvaluateResponse,
type Job as Job,
type EvalEvaluateRowsParams as EvalEvaluateRowsParams,
diff --git a/src/resources/eval/index.ts b/src/resources/alpha/eval/index.ts
similarity index 96%
rename from src/resources/eval/index.ts
rename to src/resources/alpha/eval/index.ts
index 751103f..27b4b37 100644
--- a/src/resources/eval/index.ts
+++ b/src/resources/alpha/eval/index.ts
@@ -9,7 +9,6 @@
export {
Eval,
type BenchmarkConfig,
- type EvalCandidate,
type EvaluateResponse,
type Job,
type EvalEvaluateRowsParams,
diff --git a/src/resources/eval/jobs.ts b/src/resources/alpha/eval/jobs.ts
similarity index 71%
rename from src/resources/eval/jobs.ts
rename to src/resources/alpha/eval/jobs.ts
index 9151976..b26692a 100644
--- a/src/resources/eval/jobs.ts
+++ b/src/resources/alpha/eval/jobs.ts
@@ -6,8 +6,8 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import * as Core from '../../core';
+import { APIResource } from '../../../resource';
+import * as Core from '../../../core';
import * as EvalAPI from './eval';
export class Jobs extends APIResource {
@@ -19,14 +19,14 @@ export class Jobs extends APIResource {
jobId: string,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.get(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}/result`, options);
+ return this._client.get(`/v1alpha/eval/benchmarks/${benchmarkId}/jobs/${jobId}/result`, options);
}
/**
* Cancel a job.
*/
cancel(benchmarkId: string, jobId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, {
+ return this._client.delete(`/v1alpha/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, {
...options,
headers: { Accept: '*/*', ...options?.headers },
});
@@ -36,6 +36,6 @@ export class Jobs extends APIResource {
* Get the status of a job.
*/
status(benchmarkId: string, jobId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, options);
+ return this._client.get(`/v1alpha/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, options);
}
}
diff --git a/src/resources/alpha/index.ts b/src/resources/alpha/index.ts
new file mode 100644
index 0000000..39388cf
--- /dev/null
+++ b/src/resources/alpha/index.ts
@@ -0,0 +1,42 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export {
+ Agents,
+ type InferenceStep,
+ type MemoryRetrievalStep,
+ type ShieldCallStep,
+ type ToolExecutionStep,
+ type ToolResponse,
+ type AgentCreateResponse,
+ type AgentRetrieveResponse,
+ type AgentListResponse,
+ type AgentCreateParams,
+ type AgentListParams,
+} from './agents/index';
+export { Alpha } from './alpha';
+export {
+ Benchmarks,
+ type Benchmark,
+ type ListBenchmarksResponse,
+ type BenchmarkListResponse,
+ type BenchmarkRegisterParams,
+} from './benchmarks';
+export {
+ Eval,
+ type BenchmarkConfig,
+ type EvaluateResponse,
+ type Job,
+ type EvalEvaluateRowsParams,
+ type EvalEvaluateRowsAlphaParams,
+ type EvalRunEvalParams,
+ type EvalRunEvalAlphaParams,
+} from './eval/index';
+export { Inference, type InferenceRerankResponse, type InferenceRerankParams } from './inference';
+export {
+ PostTraining,
+ type AlgorithmConfig,
+ type ListPostTrainingJobsResponse,
+ type PostTrainingJob,
+ type PostTrainingPreferenceOptimizeParams,
+ type PostTrainingSupervisedFineTuneParams,
+} from './post-training/index';
diff --git a/src/resources/alpha/inference.ts b/src/resources/alpha/inference.ts
new file mode 100644
index 0000000..ca6db21
--- /dev/null
+++ b/src/resources/alpha/inference.ts
@@ -0,0 +1,178 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import * as Core from '../../core';
+
+export class Inference extends APIResource {
+ /**
+ * Rerank a list of documents based on their relevance to a query.
+ */
+ rerank(
+ body: InferenceRerankParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return (
+ this._client.post('/v1alpha/inference/rerank', { body, ...options }) as Core.APIPromise<{
+ data: InferenceRerankResponse;
+ }>
+ )._thenUnwrap((obj) => obj.data);
+ }
+}
+
+/**
+ * List of rerank result objects, sorted by relevance score (descending)
+ */
+export type InferenceRerankResponse = Array;
+
+export namespace InferenceRerankResponse {
+ /**
+ * A single rerank result from a reranking response.
+ */
+ export interface InferenceRerankResponseItem {
+ /**
+ * The original index of the document in the input list
+ */
+ index: number;
+
+ /**
+ * The relevance score from the model output. Values are inverted when applicable
+ * so that higher scores indicate greater relevance.
+ */
+ relevance_score: number;
+ }
+}
+
+export interface InferenceRerankParams {
+ /**
+ * List of items to rerank. Each item can be a string, text content part, or image
+ * content part. Each input must not exceed the model's max input token length.
+ */
+ items: Array<
+ | string
+ | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam
+ | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam
+ >;
+
+ /**
+ * The identifier of the reranking model to use.
+ */
+ model: string;
+
+ /**
+ * The search query to rank items against. Can be a string, text content part, or
+ * image content part. The input must not exceed the model's max input token
+ * length.
+ */
+ query:
+ | string
+ | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam
+ | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam;
+
+ /**
+ * (Optional) Maximum number of results to return. Default: returns all.
+ */
+ max_num_results?: number;
+}
+
+export namespace InferenceRerankParams {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * The text content of the message
+ */
+ text: string;
+
+ /**
+ * Must be "text" to identify this as text content
+ */
+ type: 'text';
+ }
+
+ /**
+ * Image content part for OpenAI-compatible chat completion messages.
+ */
+ export interface OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
+
+ /**
+ * Must be "image_url" to identify this as image content
+ */
+ type: 'image_url';
+ }
+
+ export namespace OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
+ export interface ImageURL {
+ /**
+ * URL of the image to include in the message
+ */
+ url: string;
+
+ /**
+ * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
+ */
+ detail?: string;
+ }
+ }
+
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * The text content of the message
+ */
+ text: string;
+
+ /**
+ * Must be "text" to identify this as text content
+ */
+ type: 'text';
+ }
+
+ /**
+ * Image content part for OpenAI-compatible chat completion messages.
+ */
+ export interface OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
+
+ /**
+ * Must be "image_url" to identify this as image content
+ */
+ type: 'image_url';
+ }
+
+ export namespace OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
+ export interface ImageURL {
+ /**
+ * URL of the image to include in the message
+ */
+ url: string;
+
+ /**
+ * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
+ */
+ detail?: string;
+ }
+ }
+}
+
+export declare namespace Inference {
+ export {
+ type InferenceRerankResponse as InferenceRerankResponse,
+ type InferenceRerankParams as InferenceRerankParams,
+ };
+}
diff --git a/src/resources/post-training.ts b/src/resources/alpha/post-training.ts
similarity index 100%
rename from src/resources/post-training.ts
rename to src/resources/alpha/post-training.ts
diff --git a/src/resources/post-training/index.ts b/src/resources/alpha/post-training/index.ts
similarity index 100%
rename from src/resources/post-training/index.ts
rename to src/resources/alpha/post-training/index.ts
diff --git a/src/resources/post-training/job.ts b/src/resources/alpha/post-training/job.ts
similarity index 89%
rename from src/resources/post-training/job.ts
rename to src/resources/alpha/post-training/job.ts
index 8790ea3..83ea474 100644
--- a/src/resources/post-training/job.ts
+++ b/src/resources/alpha/post-training/job.ts
@@ -6,21 +6,16 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import * as Core from '../../core';
-import * as PostTrainingAPI from './post-training';
+import { APIResource } from '../../../resource';
+import * as Core from '../../../core';
export class Job extends APIResource {
/**
* Get all training jobs.
*/
- list(
- options?: Core.RequestOptions,
- ): Core.APIPromise> {
+ list(options?: Core.RequestOptions): Core.APIPromise {
return (
- this._client.get('/v1/post-training/jobs', options) as Core.APIPromise<{
- data: Array;
- }>
+ this._client.get('/v1alpha/post-training/jobs', options) as Core.APIPromise<{ data: JobListResponse }>
)._thenUnwrap((obj) => obj.data);
}
@@ -28,14 +23,14 @@ export class Job extends APIResource {
* Get the artifacts of a training job.
*/
artifacts(query: JobArtifactsParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get('/v1/post-training/job/artifacts', { query, ...options });
+ return this._client.get('/v1alpha/post-training/job/artifacts', { query, ...options });
}
/**
* Cancel a training job.
*/
cancel(body: JobCancelParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/post-training/job/cancel', {
+ return this._client.post('/v1alpha/post-training/job/cancel', {
body,
...options,
headers: { Accept: '*/*', ...options?.headers },
@@ -46,7 +41,7 @@ export class Job extends APIResource {
* Get the status of a training job.
*/
status(query: JobStatusParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get('/v1/post-training/job/status', { query, ...options });
+ return this._client.get('/v1alpha/post-training/job/status', { query, ...options });
}
}
diff --git a/src/resources/post-training/post-training.ts b/src/resources/alpha/post-training/post-training.ts
similarity index 96%
rename from src/resources/post-training/post-training.ts
rename to src/resources/alpha/post-training/post-training.ts
index ed606fc..33d05f9 100644
--- a/src/resources/post-training/post-training.ts
+++ b/src/resources/alpha/post-training/post-training.ts
@@ -6,8 +6,8 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../../resource';
-import * as Core from '../../core';
+import { APIResource } from '../../../resource';
+import * as Core from '../../../core';
import * as JobAPI from './job';
import {
Job,
@@ -29,7 +29,7 @@ export class PostTraining extends APIResource {
body: PostTrainingPreferenceOptimizeParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post('/v1/post-training/preference-optimize', { body, ...options });
+ return this._client.post('/v1alpha/post-training/preference-optimize', { body, ...options });
}
/**
@@ -39,7 +39,7 @@ export class PostTraining extends APIResource {
body: PostTrainingSupervisedFineTuneParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post('/v1/post-training/supervised-fine-tune', { body, ...options });
+ return this._client.post('/v1alpha/post-training/supervised-fine-tune', { body, ...options });
}
}
@@ -116,13 +116,7 @@ export namespace AlgorithmConfig {
}
export interface ListPostTrainingJobsResponse {
- data: Array;
-}
-
-export namespace ListPostTrainingJobsResponse {
- export interface Data {
- job_uuid: string;
- }
+ data: JobAPI.JobListResponse;
}
export interface PostTrainingJob {
diff --git a/src/resources/beta.ts b/src/resources/beta.ts
new file mode 100644
index 0000000..1542e94
--- /dev/null
+++ b/src/resources/beta.ts
@@ -0,0 +1,3 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export * from './beta/index';
diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts
new file mode 100644
index 0000000..28c6489
--- /dev/null
+++ b/src/resources/beta/beta.ts
@@ -0,0 +1,35 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import * as DatasetsAPI from './datasets';
+import {
+ DatasetAppendrowsParams,
+ DatasetIterrowsParams,
+ DatasetIterrowsResponse,
+ DatasetListResponse,
+ DatasetRegisterParams,
+ DatasetRegisterResponse,
+ DatasetRetrieveResponse,
+ Datasets,
+ ListDatasetsResponse,
+} from './datasets';
+
+export class Beta extends APIResource {
+ datasets: DatasetsAPI.Datasets = new DatasetsAPI.Datasets(this._client);
+}
+
+Beta.Datasets = Datasets;
+
+export declare namespace Beta {
+ export {
+ Datasets as Datasets,
+ type ListDatasetsResponse as ListDatasetsResponse,
+ type DatasetRetrieveResponse as DatasetRetrieveResponse,
+ type DatasetListResponse as DatasetListResponse,
+ type DatasetIterrowsResponse as DatasetIterrowsResponse,
+ type DatasetRegisterResponse as DatasetRegisterResponse,
+ type DatasetAppendrowsParams as DatasetAppendrowsParams,
+ type DatasetIterrowsParams as DatasetIterrowsParams,
+ type DatasetRegisterParams as DatasetRegisterParams,
+ };
+}
diff --git a/src/resources/datasets.ts b/src/resources/beta/datasets.ts
similarity index 94%
rename from src/resources/datasets.ts
rename to src/resources/beta/datasets.ts
index 29a331b..30cfae2 100644
--- a/src/resources/datasets.ts
+++ b/src/resources/beta/datasets.ts
@@ -6,16 +6,16 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../resource';
-import { isRequestOptions } from '../core';
-import * as Core from '../core';
+import { APIResource } from '../../resource';
+import { isRequestOptions } from '../../core';
+import * as Core from '../../core';
export class Datasets extends APIResource {
/**
* Get a dataset by its ID.
*/
retrieve(datasetId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/datasets/${datasetId}`, options);
+ return this._client.get(`/v1beta/datasets/${datasetId}`, options);
}
/**
@@ -23,7 +23,7 @@ export class Datasets extends APIResource {
*/
list(options?: Core.RequestOptions): Core.APIPromise {
return (
- this._client.get('/v1/datasets', options) as Core.APIPromise<{ data: DatasetListResponse }>
+ this._client.get('/v1beta/datasets', options) as Core.APIPromise<{ data: DatasetListResponse }>
)._thenUnwrap((obj) => obj.data);
}
@@ -35,7 +35,7 @@ export class Datasets extends APIResource {
body: DatasetAppendrowsParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post(`/v1/datasetio/append-rows/${datasetId}`, {
+ return this._client.post(`/v1beta/datasetio/append-rows/${datasetId}`, {
body,
...options,
headers: { Accept: '*/*', ...options?.headers },
@@ -67,7 +67,7 @@ export class Datasets extends APIResource {
if (isRequestOptions(query)) {
return this.iterrows(datasetId, {}, query);
}
- return this._client.get(`/v1/datasetio/iterrows/${datasetId}`, { query, ...options });
+ return this._client.get(`/v1beta/datasetio/iterrows/${datasetId}`, { query, ...options });
}
/**
@@ -77,14 +77,14 @@ export class Datasets extends APIResource {
body: DatasetRegisterParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post('/v1/datasets', { body, ...options });
+ return this._client.post('/v1beta/datasets', { body, ...options });
}
/**
* Unregister a dataset by its ID.
*/
unregister(datasetId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/datasets/${datasetId}`, {
+ return this._client.delete(`/v1beta/datasets/${datasetId}`, {
...options,
headers: { Accept: '*/*', ...options?.headers },
});
diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts
new file mode 100644
index 0000000..de238a5
--- /dev/null
+++ b/src/resources/beta/index.ts
@@ -0,0 +1,14 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export { Beta } from './beta';
+export {
+ Datasets,
+ type ListDatasetsResponse,
+ type DatasetRetrieveResponse,
+ type DatasetListResponse,
+ type DatasetIterrowsResponse,
+ type DatasetRegisterResponse,
+ type DatasetAppendrowsParams,
+ type DatasetIterrowsParams,
+ type DatasetRegisterParams,
+} from './datasets';
diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts
index 31c21f4..c4447cb 100644
--- a/src/resources/chat/chat.ts
+++ b/src/resources/chat/chat.ts
@@ -52,6 +52,11 @@ export interface ChatCompletionChunk {
* The object type, which will be "chat.completion.chunk"
*/
object: 'chat.completion.chunk';
+
+ /**
+ * Token usage information (typically included in final chunk with stream_options)
+ */
+ usage?: ChatCompletionChunk.Usage;
}
export namespace ChatCompletionChunk {
@@ -90,6 +95,11 @@ export namespace ChatCompletionChunk {
*/
content?: string;
+ /**
+ * (Optional) The reasoning content from the model (non-standard, for o1/o3 models)
+ */
+ reasoning_content?: string;
+
/**
* (Optional) The refusal of the delta
*/
@@ -223,6 +233,58 @@ export namespace ChatCompletionChunk {
}
}
}
+
+ /**
+ * Token usage information (typically included in final chunk with stream_options)
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the completion
+ */
+ completion_tokens: number;
+
+ /**
+ * Number of tokens in the prompt
+ */
+ prompt_tokens: number;
+
+ /**
+ * Total tokens used (prompt + completion)
+ */
+ total_tokens: number;
+
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ completion_tokens_details?: Usage.CompletionTokensDetails;
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ prompt_tokens_details?: Usage.PromptTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ export interface CompletionTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ export interface PromptTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+ }
}
Chat.Completions = Completions;
diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts
index be49654..05575c4 100644
--- a/src/resources/chat/completions.ts
+++ b/src/resources/chat/completions.ts
@@ -17,8 +17,8 @@ import { Stream } from '../../streaming';
export class Completions extends APIResource {
/**
- * Generate an OpenAI-compatible chat completion for the given messages using the
- * specified model.
+ * Create chat completions. Generate an OpenAI-compatible chat completion for the
+ * given messages using the specified model.
*/
create(
body: CompletionCreateParamsNonStreaming,
@@ -36,22 +36,20 @@ export class Completions extends APIResource {
body: CompletionCreateParams,
options?: Core.RequestOptions,
): APIPromise | APIPromise> {
- return this._client.post('/v1/openai/v1/chat/completions', {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
+ return this._client.post('/v1/chat/completions', { body, ...options, stream: body.stream ?? false }) as
+ | APIPromise
+ | APIPromise>;
}
/**
- * Describe a chat completion by its ID.
+ * Get chat completion. Describe a chat completion by its ID.
*/
retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/openai/v1/chat/completions/${completionId}`, options);
+ return this._client.get(`/v1/chat/completions/${completionId}`, options);
}
/**
- * List all chat completions.
+ * List chat completions.
*/
list(
query?: CompletionListParams,
@@ -67,11 +65,10 @@ export class Completions extends APIResource {
if (isRequestOptions(query)) {
return this.list({}, query);
}
- return this._client.getAPIList(
- '/v1/openai/v1/chat/completions',
- CompletionListResponsesOpenAICursorPage,
- { query, ...options },
- );
+ return this._client.getAPIList('/v1/chat/completions', CompletionListResponsesOpenAICursorPage, {
+ query,
+ ...options,
+ });
}
}
@@ -113,6 +110,11 @@ export namespace CompletionCreateResponse {
* The object type, which will be "chat.completion"
*/
object: 'chat.completion';
+
+ /**
+ * Token usage information for the completion
+ */
+ usage?: OpenAIChatCompletion.Usage;
}
export namespace OpenAIChatCompletion {
@@ -510,6 +512,58 @@ export namespace CompletionCreateResponse {
}
}
}
+
+ /**
+ * Token usage information for the completion
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the completion
+ */
+ completion_tokens: number;
+
+ /**
+ * Number of tokens in the prompt
+ */
+ prompt_tokens: number;
+
+ /**
+ * Total tokens used (prompt + completion)
+ */
+ total_tokens: number;
+
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ completion_tokens_details?: Usage.CompletionTokensDetails;
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ prompt_tokens_details?: Usage.PromptTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ export interface CompletionTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ export interface PromptTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+ }
}
}
@@ -546,6 +600,11 @@ export interface CompletionRetrieveResponse {
* The object type, which will be "chat.completion"
*/
object: 'chat.completion';
+
+ /**
+ * Token usage information for the completion
+ */
+ usage?: CompletionRetrieveResponse.Usage;
}
export namespace CompletionRetrieveResponse {
@@ -1233,6 +1292,58 @@ export namespace CompletionRetrieveResponse {
type: 'text';
}
}
+
+ /**
+ * Token usage information for the completion
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the completion
+ */
+ completion_tokens: number;
+
+ /**
+ * Number of tokens in the prompt
+ */
+ prompt_tokens: number;
+
+ /**
+ * Total tokens used (prompt + completion)
+ */
+ total_tokens: number;
+
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ completion_tokens_details?: Usage.CompletionTokensDetails;
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ prompt_tokens_details?: Usage.PromptTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ export interface CompletionTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ export interface PromptTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+ }
}
export interface CompletionListResponse {
@@ -1268,6 +1379,11 @@ export interface CompletionListResponse {
* The object type, which will be "chat.completion"
*/
object: 'chat.completion';
+
+ /**
+ * Token usage information for the completion
+ */
+ usage?: CompletionListResponse.Usage;
}
export namespace CompletionListResponse {
@@ -1955,6 +2071,58 @@ export namespace CompletionListResponse {
type: 'text';
}
}
+
+ /**
+ * Token usage information for the completion
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the completion
+ */
+ completion_tokens: number;
+
+ /**
+ * Number of tokens in the prompt
+ */
+ prompt_tokens: number;
+
+ /**
+ * Total tokens used (prompt + completion)
+ */
+ total_tokens: number;
+
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ completion_tokens_details?: Usage.CompletionTokensDetails;
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ prompt_tokens_details?: Usage.PromptTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ export interface CompletionTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ export interface PromptTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+ }
}
export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming;
diff --git a/src/resources/completions.ts b/src/resources/completions.ts
index 1d6a17e..4d9ecf5 100644
--- a/src/resources/completions.ts
+++ b/src/resources/completions.ts
@@ -14,8 +14,8 @@ import { Stream } from '../streaming';
export class Completions extends APIResource {
/**
- * Generate an OpenAI-compatible completion for the given prompt using the
- * specified model.
+ * Create completion. Generate an OpenAI-compatible completion for the given prompt
+ * using the specified model.
*/
create(
body: CompletionCreateParamsNonStreaming,
@@ -33,11 +33,9 @@ export class Completions extends APIResource {
body: CompletionCreateParams,
options?: Core.RequestOptions,
): APIPromise | APIPromise> {
- return this._client.post('/v1/openai/v1/completions', {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
+ return this._client.post('/v1/completions', { body, ...options, stream: body.stream ?? false }) as
+ | APIPromise
+ | APIPromise>;
}
}
@@ -180,8 +178,6 @@ export interface CompletionCreateParamsBase {
*/
frequency_penalty?: number;
- guided_choice?: Array;
-
/**
* (Optional) The logit bias to use.
*/
@@ -207,8 +203,6 @@ export interface CompletionCreateParamsBase {
*/
presence_penalty?: number;
- prompt_logprobs?: number;
-
/**
* (Optional) The seed to use.
*/
diff --git a/src/resources/conversations.ts b/src/resources/conversations.ts
new file mode 100644
index 0000000..6b50950
--- /dev/null
+++ b/src/resources/conversations.ts
@@ -0,0 +1,3 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export * from './conversations/index';
diff --git a/src/resources/conversations/conversations.ts b/src/resources/conversations/conversations.ts
new file mode 100644
index 0000000..c4f97d6
--- /dev/null
+++ b/src/resources/conversations/conversations.ts
@@ -0,0 +1,549 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import * as Core from '../../core';
+import * as ItemsAPI from './items';
+import {
+ ItemCreateParams,
+ ItemCreateResponse,
+ ItemGetResponse,
+ ItemListParams,
+ ItemListResponse,
+ ItemListResponsesOpenAICursorPage,
+ Items,
+} from './items';
+
+export class Conversations extends APIResource {
+ items: ItemsAPI.Items = new ItemsAPI.Items(this._client);
+
+ /**
+ * Create a conversation. Create a conversation.
+ */
+ create(body: ConversationCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.post('/v1/conversations', { body, ...options });
+ }
+
+ /**
+ * Retrieve a conversation. Get a conversation with the given ID.
+ */
+ retrieve(conversationId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.get(`/v1/conversations/${conversationId}`, options);
+ }
+
+ /**
+ * Update a conversation. Update a conversation's metadata with the given ID.
+ */
+ update(
+ conversationId: string,
+ body: ConversationUpdateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/v1/conversations/${conversationId}`, { body, ...options });
+ }
+
+ /**
+ * Delete a conversation. Delete a conversation with the given ID.
+ */
+ delete(conversationId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.delete(`/v1/conversations/${conversationId}`, options);
+ }
+}
+
+/**
+ * OpenAI-compatible conversation object.
+ */
+export interface ConversationObject {
+ id: string;
+
+ created_at: number;
+
+ object: 'conversation';
+
+ items?: Array;
+
+ metadata?: { [key: string]: string };
+}
+
+/**
+ * Response for deleted conversation.
+ */
+export interface ConversationDeleteResponse {
+ id: string;
+
+ deleted: boolean;
+
+ object: string;
+}
+
+export interface ConversationCreateParams {
+ /**
+ * Initial items to include in the conversation context.
+ */
+ items?: Array<
+ | ConversationCreateParams.OpenAIResponseMessage
+ | ConversationCreateParams.OpenAIResponseOutputMessageWebSearchToolCall
+ | ConversationCreateParams.OpenAIResponseOutputMessageFileSearchToolCall
+ | ConversationCreateParams.OpenAIResponseOutputMessageFunctionToolCall
+ | ConversationCreateParams.OpenAIResponseInputFunctionToolCallOutput
+ | ConversationCreateParams.OpenAIResponseMcpApprovalRequest
+ | ConversationCreateParams.OpenAIResponseMcpApprovalResponse
+ | ConversationCreateParams.OpenAIResponseOutputMessageMcpCall
+ | ConversationCreateParams.OpenAIResponseOutputMessageMcpListTools
+ >;
+
+ /**
+ * Set of key-value pairs that can be attached to an object.
+ */
+ metadata?: { [key: string]: string };
+}
+
+export namespace ConversationCreateParams {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * This represents the output of a function call that gets passed back to the
+ * model.
+ */
+ export interface OpenAIResponseInputFunctionToolCallOutput {
+ call_id: string;
+
+ output: string;
+
+ type: 'function_call_output';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+}
+
+export interface ConversationUpdateParams {
+ /**
+ * Set of key-value pairs that can be attached to an object.
+ */
+ metadata: { [key: string]: string };
+}
+
+Conversations.Items = Items;
+Conversations.ItemListResponsesOpenAICursorPage = ItemListResponsesOpenAICursorPage;
+
+export declare namespace Conversations {
+ export {
+ type ConversationObject as ConversationObject,
+ type ConversationDeleteResponse as ConversationDeleteResponse,
+ type ConversationCreateParams as ConversationCreateParams,
+ type ConversationUpdateParams as ConversationUpdateParams,
+ };
+
+ export {
+ Items as Items,
+ type ItemCreateResponse as ItemCreateResponse,
+ type ItemListResponse as ItemListResponse,
+ type ItemGetResponse as ItemGetResponse,
+ ItemListResponsesOpenAICursorPage as ItemListResponsesOpenAICursorPage,
+ type ItemCreateParams as ItemCreateParams,
+ type ItemListParams as ItemListParams,
+ };
+}
diff --git a/src/resources/conversations/index.ts b/src/resources/conversations/index.ts
new file mode 100644
index 0000000..de33b78
--- /dev/null
+++ b/src/resources/conversations/index.ts
@@ -0,0 +1,18 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export {
+ Conversations,
+ type ConversationObject,
+ type ConversationDeleteResponse,
+ type ConversationCreateParams,
+ type ConversationUpdateParams,
+} from './conversations';
+export {
+ ItemListResponsesOpenAICursorPage,
+ Items,
+ type ItemCreateResponse,
+ type ItemListResponse,
+ type ItemGetResponse,
+ type ItemCreateParams,
+ type ItemListParams,
+} from './items';
diff --git a/src/resources/conversations/items.ts b/src/resources/conversations/items.ts
new file mode 100644
index 0000000..73fc238
--- /dev/null
+++ b/src/resources/conversations/items.ts
@@ -0,0 +1,1854 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import { isRequestOptions } from '../../core';
+import * as Core from '../../core';
+import { OpenAICursorPage, type OpenAICursorPageParams } from '../../pagination';
+
+export class Items extends APIResource {
+ /**
+ * Create items. Create items in the conversation.
+ */
+ create(
+ conversationId: string,
+ body: ItemCreateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/v1/conversations/${conversationId}/items`, { body, ...options });
+ }
+
+ /**
+ * List items. List items in the conversation.
+ */
+ list(
+ conversationId: string,
+ query?: ItemListParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ conversationId: string,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ conversationId: string,
+ query: ItemListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list(conversationId, {}, query);
+ }
+ return this._client.getAPIList(
+ `/v1/conversations/${conversationId}/items`,
+ ItemListResponsesOpenAICursorPage,
+ { query, ...options },
+ );
+ }
+
+ /**
+ * Retrieve an item. Retrieve a conversation item.
+ */
+ get(
+ conversationId: string,
+ itemId: string,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.get(`/v1/conversations/${conversationId}/items/${itemId}`, options);
+ }
+}
+
+export class ItemListResponsesOpenAICursorPage extends OpenAICursorPage {}
+
+/**
+ * List of conversation items with pagination.
+ */
+export interface ItemCreateResponse {
+ data: Array<
+ | ItemCreateResponse.OpenAIResponseMessage
+ | ItemCreateResponse.OpenAIResponseOutputMessageWebSearchToolCall
+ | ItemCreateResponse.OpenAIResponseOutputMessageFileSearchToolCall
+ | ItemCreateResponse.OpenAIResponseOutputMessageFunctionToolCall
+ | ItemCreateResponse.OpenAIResponseInputFunctionToolCallOutput
+ | ItemCreateResponse.OpenAIResponseMcpApprovalRequest
+ | ItemCreateResponse.OpenAIResponseMcpApprovalResponse
+ | ItemCreateResponse.OpenAIResponseOutputMessageMcpCall
+ | ItemCreateResponse.OpenAIResponseOutputMessageMcpListTools
+ >;
+
+ has_more: boolean;
+
+ object: string;
+
+ first_id?: string;
+
+ last_id?: string;
+}
+
+export namespace ItemCreateResponse {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * This represents the output of a function call that gets passed back to the
+ * model.
+ */
+ export interface OpenAIResponseInputFunctionToolCallOutput {
+ call_id: string;
+
+ output: string;
+
+ type: 'function_call_output';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+}
+
+/**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+export type ItemListResponse =
+ | ItemListResponse.OpenAIResponseMessage
+ | ItemListResponse.OpenAIResponseOutputMessageWebSearchToolCall
+ | ItemListResponse.OpenAIResponseOutputMessageFileSearchToolCall
+ | ItemListResponse.OpenAIResponseOutputMessageFunctionToolCall
+ | ItemListResponse.OpenAIResponseInputFunctionToolCallOutput
+ | ItemListResponse.OpenAIResponseMcpApprovalRequest
+ | ItemListResponse.OpenAIResponseMcpApprovalResponse
+ | ItemListResponse.OpenAIResponseOutputMessageMcpCall
+ | ItemListResponse.OpenAIResponseOutputMessageMcpListTools;
+
+export namespace ItemListResponse {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * This represents the output of a function call that gets passed back to the
+ * model.
+ */
+ export interface OpenAIResponseInputFunctionToolCallOutput {
+ call_id: string;
+
+ output: string;
+
+ type: 'function_call_output';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+}
+
+/**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+export type ItemGetResponse =
+ | ItemGetResponse.OpenAIResponseMessage
+ | ItemGetResponse.OpenAIResponseOutputMessageWebSearchToolCall
+ | ItemGetResponse.OpenAIResponseOutputMessageFileSearchToolCall
+ | ItemGetResponse.OpenAIResponseOutputMessageFunctionToolCall
+ | ItemGetResponse.OpenAIResponseInputFunctionToolCallOutput
+ | ItemGetResponse.OpenAIResponseMcpApprovalRequest
+ | ItemGetResponse.OpenAIResponseMcpApprovalResponse
+ | ItemGetResponse.OpenAIResponseOutputMessageMcpCall
+ | ItemGetResponse.OpenAIResponseOutputMessageMcpListTools;
+
+export namespace ItemGetResponse {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * This represents the output of a function call that gets passed back to the
+ * model.
+ */
+ export interface OpenAIResponseInputFunctionToolCallOutput {
+ call_id: string;
+
+ output: string;
+
+ type: 'function_call_output';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+}
+
+export interface ItemCreateParams {
+ /**
+ * Items to include in the conversation context.
+ */
+ items: Array<
+ | ItemCreateParams.OpenAIResponseMessage
+ | ItemCreateParams.OpenAIResponseOutputMessageWebSearchToolCall
+ | ItemCreateParams.OpenAIResponseOutputMessageFileSearchToolCall
+ | ItemCreateParams.OpenAIResponseOutputMessageFunctionToolCall
+ | ItemCreateParams.OpenAIResponseInputFunctionToolCallOutput
+ | ItemCreateParams.OpenAIResponseMcpApprovalRequest
+ | ItemCreateParams.OpenAIResponseMcpApprovalResponse
+ | ItemCreateParams.OpenAIResponseOutputMessageMcpCall
+ | ItemCreateParams.OpenAIResponseOutputMessageMcpListTools
+ >;
+}
+
+export namespace ItemCreateParams {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * This represents the output of a function call that gets passed back to the
+ * model.
+ */
+ export interface OpenAIResponseInputFunctionToolCallOutput {
+ call_id: string;
+
+ output: string;
+
+ type: 'function_call_output';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+}
+
+export interface ItemListParams extends OpenAICursorPageParams {
+ /**
+ * Specify additional output data to include in the response.
+ */
+ include?: Array<
+ | 'web_search_call.action.sources'
+ | 'code_interpreter_call.outputs'
+ | 'computer_call_output.output.image_url'
+ | 'file_search_call.results'
+ | 'message.input_image.image_url'
+ | 'message.output_text.logprobs'
+ | 'reasoning.encrypted_content'
+ >;
+
+ /**
+ * The order to return items in (asc or desc, default desc).
+ */
+ order?: 'asc' | 'desc';
+}
+
+Items.ItemListResponsesOpenAICursorPage = ItemListResponsesOpenAICursorPage;
+
+export declare namespace Items {
+ export {
+ type ItemCreateResponse as ItemCreateResponse,
+ type ItemListResponse as ItemListResponse,
+ type ItemGetResponse as ItemGetResponse,
+ ItemListResponsesOpenAICursorPage as ItemListResponsesOpenAICursorPage,
+ type ItemCreateParams as ItemCreateParams,
+ type ItemListParams as ItemListParams,
+ };
+}
diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts
index f164e4b..7dd95db 100644
--- a/src/resources/embeddings.ts
+++ b/src/resources/embeddings.ts
@@ -11,14 +11,14 @@ import * as Core from '../core';
export class Embeddings extends APIResource {
/**
- * Generate OpenAI-compatible embeddings for the given input using the specified
- * model.
+ * Create embeddings. Generate OpenAI-compatible embeddings for the given input
+ * using the specified model.
*/
create(
body: EmbeddingCreateParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post('/v1/openai/v1/embeddings', { body, ...options });
+ return this._client.post('/v1/embeddings', { body, ...options });
}
}
diff --git a/src/resources/files.ts b/src/resources/files.ts
index 2124638..e7abab7 100644
--- a/src/resources/files.ts
+++ b/src/resources/files.ts
@@ -13,25 +13,27 @@ import { OpenAICursorPage, type OpenAICursorPageParams } from '../pagination';
export class Files extends APIResource {
/**
- * Upload a file that can be used across various endpoints. The file upload should
- * be a multipart form request with:
+ * Upload file. Upload a file that can be used across various endpoints.
+ *
+ * The file upload should be a multipart form request with:
*
* - file: The File object (not file name) to be uploaded.
* - purpose: The intended purpose of the uploaded file.
+ * - expires_after: Optional form values describing expiration for the file.
*/
create(body: FileCreateParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/openai/v1/files', Core.multipartFormRequestOptions({ body, ...options }));
+ return this._client.post('/v1/files', Core.multipartFormRequestOptions({ body, ...options }));
}
/**
- * Returns information about a specific file.
+ * Retrieve file. Returns information about a specific file.
*/
retrieve(fileId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/openai/v1/files/${fileId}`, options);
+ return this._client.get(`/v1/files/${fileId}`, options);
}
/**
- * Returns a list of files that belong to the user's organization.
+ * List files. Returns a list of files that belong to the user's organization.
*/
list(query?: FileListParams, options?: Core.RequestOptions): Core.PagePromise;
list(options?: Core.RequestOptions): Core.PagePromise;
@@ -42,21 +44,21 @@ export class Files extends APIResource {
if (isRequestOptions(query)) {
return this.list({}, query);
}
- return this._client.getAPIList('/v1/openai/v1/files', FilesOpenAICursorPage, { query, ...options });
+ return this._client.getAPIList('/v1/files', FilesOpenAICursorPage, { query, ...options });
}
/**
- * Delete a file.
+ * Delete file.
*/
delete(fileId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/openai/v1/files/${fileId}`, options);
+ return this._client.delete(`/v1/files/${fileId}`, options);
}
/**
- * Returns the contents of the specified file.
+ * Retrieve file content. Returns the contents of the specified file.
*/
content(fileId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/openai/v1/files/${fileId}/content`, options);
+ return this._client.get(`/v1/files/${fileId}/content`, options);
}
}
@@ -161,6 +163,28 @@ export interface FileCreateParams {
* Valid purpose values for OpenAI Files API.
*/
purpose: 'assistants' | 'batch';
+
+ /**
+ * Control expiration of uploaded files. Params:
+ *
+ * - anchor, must be "created_at"
+ * - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)
+ */
+ expires_after?: FileCreateParams.ExpiresAfter;
+}
+
+export namespace FileCreateParams {
+ /**
+ * Control expiration of uploaded files. Params:
+ *
+ * - anchor, must be "created_at"
+ * - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)
+ */
+ export interface ExpiresAfter {
+ anchor: 'created_at';
+
+ seconds: number;
+ }
}
export interface FileListParams extends OpenAICursorPageParams {
diff --git a/src/resources/index.ts b/src/resources/index.ts
index 2710bf6..982639e 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -7,26 +7,8 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export * from './shared';
-export {
- Agents,
- type InferenceStep,
- type MemoryRetrievalStep,
- type ShieldCallStep,
- type ToolExecutionStep,
- type ToolResponse,
- type AgentCreateResponse,
- type AgentRetrieveResponse,
- type AgentListResponse,
- type AgentCreateParams,
- type AgentListParams,
-} from './agents/agents';
-export {
- Benchmarks,
- type Benchmark,
- type ListBenchmarksResponse,
- type BenchmarkListResponse,
- type BenchmarkRegisterParams,
-} from './benchmarks';
+export { Alpha } from './alpha/alpha';
+export { Beta } from './beta/beta';
export { Chat, type ChatCompletionChunk } from './chat/chat';
export {
Completions,
@@ -36,28 +18,13 @@ export {
type CompletionCreateParamsStreaming,
} from './completions';
export {
- Datasets,
- type ListDatasetsResponse,
- type DatasetRetrieveResponse,
- type DatasetListResponse,
- type DatasetIterrowsResponse,
- type DatasetRegisterResponse,
- type DatasetAppendrowsParams,
- type DatasetIterrowsParams,
- type DatasetRegisterParams,
-} from './datasets';
+ Conversations,
+ type ConversationObject,
+ type ConversationDeleteResponse,
+ type ConversationCreateParams,
+ type ConversationUpdateParams,
+} from './conversations/conversations';
export { Embeddings, type CreateEmbeddingsResponse, type EmbeddingCreateParams } from './embeddings';
-export {
- Eval,
- type BenchmarkConfig,
- type EvalCandidate,
- type EvaluateResponse,
- type Job,
- type EvalEvaluateRowsParams,
- type EvalEvaluateRowsAlphaParams,
- type EvalRunEvalParams,
- type EvalRunEvalAlphaParams,
-} from './eval/eval';
export {
FilesOpenAICursorPage,
Files,
@@ -68,25 +35,6 @@ export {
type FileCreateParams,
type FileListParams,
} from './files';
-export {
- Inference,
- type ChatCompletionResponseStreamChunk,
- type CompletionResponse,
- type EmbeddingsResponse,
- type TokenLogProbs,
- type InferenceBatchChatCompletionResponse,
- type InferenceRerankResponse,
- type InferenceBatchChatCompletionParams,
- type InferenceBatchCompletionParams,
- type InferenceChatCompletionParams,
- type InferenceChatCompletionParamsNonStreaming,
- type InferenceChatCompletionParamsStreaming,
- type InferenceCompletionParams,
- type InferenceCompletionParamsNonStreaming,
- type InferenceCompletionParamsStreaming,
- type InferenceEmbeddingsParams,
- type InferenceRerankParams,
-} from './inference';
export { Inspect, type HealthInfo, type ProviderInfo, type RouteInfo, type VersionInfo } from './inspect';
export {
Models,
@@ -97,13 +45,15 @@ export {
} from './models/models';
export { Moderations, type CreateResponse, type ModerationCreateParams } from './moderations';
export {
- PostTraining,
- type AlgorithmConfig,
- type ListPostTrainingJobsResponse,
- type PostTrainingJob,
- type PostTrainingPreferenceOptimizeParams,
- type PostTrainingSupervisedFineTuneParams,
-} from './post-training/post-training';
+ Prompts,
+ type ListPromptsResponse,
+ type Prompt,
+ type PromptListResponse,
+ type PromptCreateParams,
+ type PromptRetrieveParams,
+ type PromptUpdateParams,
+ type PromptSetDefaultVersionParams,
+} from './prompts/prompts';
export { Providers, type ListProvidersResponse, type ProviderListResponse } from './providers';
export {
ResponseListResponsesOpenAICursorPage,
@@ -146,25 +96,6 @@ export {
type SyntheticDataGenerationResponse,
type SyntheticDataGenerationGenerateParams,
} from './synthetic-data-generation';
-export {
- Telemetry,
- type Event,
- type QueryCondition,
- type QuerySpansResponse,
- type SpanWithStatus,
- type Trace,
- type TelemetryGetSpanResponse,
- type TelemetryGetSpanTreeResponse,
- type TelemetryQueryMetricsResponse,
- type TelemetryQuerySpansResponse,
- type TelemetryQueryTracesResponse,
- type TelemetryGetSpanTreeParams,
- type TelemetryLogEventParams,
- type TelemetryQueryMetricsParams,
- type TelemetryQuerySpansParams,
- type TelemetryQueryTracesParams,
- type TelemetrySaveSpansToDatasetParams,
-} from './telemetry';
export {
ToolRuntime,
type ToolDef,
@@ -180,21 +111,7 @@ export {
type ToolgroupListResponse,
type ToolgroupRegisterParams,
} from './toolgroups';
-export {
- Tools,
- type ListToolsResponse,
- type Tool,
- type ToolListResponse,
- type ToolListParams,
-} from './tools';
-export {
- VectorDBs,
- type ListVectorDBsResponse,
- type VectorDBRetrieveResponse,
- type VectorDBListResponse,
- type VectorDBRegisterResponse,
- type VectorDBRegisterParams,
-} from './vector-dbs';
+export { Tools, type ToolListResponse, type ToolListParams } from './tools';
export {
VectorIo,
type QueryChunksResponse,
diff --git a/src/resources/inference.ts b/src/resources/inference.ts
deleted file mode 100644
index b262392..0000000
--- a/src/resources/inference.ts
+++ /dev/null
@@ -1,768 +0,0 @@
-// Copyright (c) Meta Platforms, Inc. and affiliates.
-// All rights reserved.
-//
-// This source code is licensed under the terms described in the LICENSE file in
-// the root directory of this source tree.
-
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../resource';
-import { APIPromise } from '../core';
-import * as Core from '../core';
-import * as InferenceAPI from './inference';
-import * as Shared from './shared';
-import { Stream } from '../streaming';
-
-export class Inference extends APIResource {
- /**
- * Generate chat completions for a batch of messages using the specified model.
- */
- batchChatCompletion(
- body: InferenceBatchChatCompletionParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post('/v1/inference/batch-chat-completion', { body, ...options });
- }
-
- /**
- * Generate completions for a batch of content using the specified model.
- */
- batchCompletion(
- body: InferenceBatchCompletionParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post('/v1/inference/batch-completion', { body, ...options });
- }
-
- /**
- * Generate a chat completion for the given messages using the specified model.
- *
- * @deprecated /v1/inference/chat-completion is deprecated. Please use /v1/openai/v1/chat/completions.
- */
- chatCompletion(
- body: InferenceChatCompletionParamsNonStreaming,
- options?: Core.RequestOptions,
- ): APIPromise;
- chatCompletion(
- body: InferenceChatCompletionParamsStreaming,
- options?: Core.RequestOptions,
- ): APIPromise>;
- chatCompletion(
- body: InferenceChatCompletionParamsBase,
- options?: Core.RequestOptions,
- ): APIPromise | Shared.ChatCompletionResponse>;
- chatCompletion(
- body: InferenceChatCompletionParams,
- options?: Core.RequestOptions,
- ): APIPromise | APIPromise> {
- return this._client.post('/v1/inference/chat-completion', {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
- }
-
- /**
- * Generate a completion for the given content using the specified model.
- *
- * @deprecated /v1/inference/completion is deprecated. Please use /v1/openai/v1/completions.
- */
- completion(
- body: InferenceCompletionParamsNonStreaming,
- options?: Core.RequestOptions,
- ): APIPromise;
- completion(
- body: InferenceCompletionParamsStreaming,
- options?: Core.RequestOptions,
- ): APIPromise>;
- completion(
- body: InferenceCompletionParamsBase,
- options?: Core.RequestOptions,
- ): APIPromise | CompletionResponse>;
- completion(
- body: InferenceCompletionParams,
- options?: Core.RequestOptions,
- ): APIPromise | APIPromise> {
- return this._client.post('/v1/inference/completion', {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
- }
-
- /**
- * Generate embeddings for content pieces using the specified model.
- *
- * @deprecated /v1/inference/embeddings is deprecated. Please use /v1/openai/v1/embeddings.
- */
- embeddings(
- body: InferenceEmbeddingsParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post('/v1/inference/embeddings', { body, ...options });
- }
-
- /**
- * Rerank a list of documents based on their relevance to a query.
- */
- rerank(
- body: InferenceRerankParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return (
- this._client.post('/v1/inference/rerank', { body, ...options }) as Core.APIPromise<{
- data: InferenceRerankResponse;
- }>
- )._thenUnwrap((obj) => obj.data);
- }
-}
-
-/**
- * A chunk of a streamed chat completion response.
- */
-export interface ChatCompletionResponseStreamChunk {
- /**
- * The event containing the new content
- */
- event: ChatCompletionResponseStreamChunk.Event;
-
- /**
- * (Optional) List of metrics associated with the API response
- */
- metrics?: Array;
-}
-
-export namespace ChatCompletionResponseStreamChunk {
- /**
- * The event containing the new content
- */
- export interface Event {
- /**
- * Content generated since last event. This can be one or more tokens, or a tool
- * call.
- */
- delta: Shared.ContentDelta;
-
- /**
- * Type of the event
- */
- event_type: 'start' | 'complete' | 'progress';
-
- /**
- * Optional log probabilities for generated tokens
- */
- logprobs?: Array;
-
- /**
- * Optional reason why generation stopped, if complete
- */
- stop_reason?: 'end_of_turn' | 'end_of_message' | 'out_of_tokens';
- }
-}
-
-/**
- * Response from a completion request.
- */
-export interface CompletionResponse {
- /**
- * The generated completion text
- */
- content: string;
-
- /**
- * Reason why generation stopped
- */
- stop_reason: 'end_of_turn' | 'end_of_message' | 'out_of_tokens';
-
- /**
- * Optional log probabilities for generated tokens
- */
- logprobs?: Array;
-
- /**
- * (Optional) List of metrics associated with the API response
- */
- metrics?: Array;
-}
-
-/**
- * Response containing generated embeddings.
- */
-export interface EmbeddingsResponse {
- /**
- * List of embedding vectors, one per input content. Each embedding is a list of
- * floats. The dimensionality of the embedding is model-specific; you can check
- * model metadata using /models/{model_id}
- */
- embeddings: Array>;
-}
-
-/**
- * Log probabilities for generated tokens.
- */
-export interface TokenLogProbs {
- /**
- * Dictionary mapping tokens to their log probabilities
- */
- logprobs_by_token: { [key: string]: number };
-}
-
-/**
- * Response from a batch chat completion request.
- */
-export interface InferenceBatchChatCompletionResponse {
- /**
- * List of chat completion responses, one for each conversation in the batch
- */
- batch: Array;
-}
-
-/**
- * List of rerank result objects, sorted by relevance score (descending)
- */
-export type InferenceRerankResponse = Array;
-
-export namespace InferenceRerankResponse {
- /**
- * A single rerank result from a reranking response.
- */
- export interface InferenceRerankResponseItem {
- /**
- * The original index of the document in the input list
- */
- index: number;
-
- /**
- * The relevance score from the model output. Values are inverted when applicable
- * so that higher scores indicate greater relevance.
- */
- relevance_score: number;
- }
-}
-
-export interface InferenceBatchChatCompletionParams {
- /**
- * The messages to generate completions for.
- */
- messages_batch: Array>;
-
- /**
- * The identifier of the model to use. The model must be registered with Llama
- * Stack and available via the /models endpoint.
- */
- model_id: string;
-
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- logprobs?: InferenceBatchChatCompletionParams.Logprobs;
-
- /**
- * (Optional) Grammar specification for guided (structured) decoding.
- */
- response_format?: Shared.ResponseFormat;
-
- /**
- * (Optional) Parameters to control the sampling strategy.
- */
- sampling_params?: Shared.SamplingParams;
-
- /**
- * (Optional) Configuration for tool use.
- */
- tool_config?: InferenceBatchChatCompletionParams.ToolConfig;
-
- /**
- * (Optional) List of tool definitions available to the model.
- */
- tools?: Array;
-}
-
-export namespace InferenceBatchChatCompletionParams {
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- export interface Logprobs {
- /**
- * How many tokens (for each position) to return log probabilities for.
- */
- top_k?: number;
- }
-
- /**
- * (Optional) Configuration for tool use.
- */
- export interface ToolConfig {
- /**
- * (Optional) Config for how to override the default system prompt. -
- * `SystemMessageBehavior.append`: Appends the provided system message to the
- * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default
- * system prompt with the provided system message. The system message can include
- * the string '{{function_definitions}}' to indicate where the function definitions
- * should be inserted.
- */
- system_message_behavior?: 'append' | 'replace';
-
- /**
- * (Optional) Whether tool use is automatic, required, or none. Can also specify a
- * tool name to use a specific tool. Defaults to ToolChoice.auto.
- */
- tool_choice?: 'auto' | 'required' | 'none' | (string & {});
-
- /**
- * (Optional) Instructs the model how to format tool calls. By default, Llama Stack
- * will attempt to use a format that is best adapted to the model. -
- * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. -
- * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a
- * tag. - `ToolPromptFormat.python_list`: The tool calls
- * are output as Python syntax -- a list of function calls.
- */
- tool_prompt_format?: 'json' | 'function_tag' | 'python_list';
- }
-
- export interface Tool {
- tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {});
-
- description?: string;
-
- parameters?: { [key: string]: Shared.ToolParamDefinition };
- }
-}
-
-export interface InferenceBatchCompletionParams {
- /**
- * The content to generate completions for.
- */
- content_batch: Array;
-
- /**
- * The identifier of the model to use. The model must be registered with Llama
- * Stack and available via the /models endpoint.
- */
- model_id: string;
-
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- logprobs?: InferenceBatchCompletionParams.Logprobs;
-
- /**
- * (Optional) Grammar specification for guided (structured) decoding.
- */
- response_format?: Shared.ResponseFormat;
-
- /**
- * (Optional) Parameters to control the sampling strategy.
- */
- sampling_params?: Shared.SamplingParams;
-}
-
-export namespace InferenceBatchCompletionParams {
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- export interface Logprobs {
- /**
- * How many tokens (for each position) to return log probabilities for.
- */
- top_k?: number;
- }
-}
-
-export type InferenceChatCompletionParams =
- | InferenceChatCompletionParamsNonStreaming
- | InferenceChatCompletionParamsStreaming;
-
-export interface InferenceChatCompletionParamsBase {
- /**
- * List of messages in the conversation.
- */
- messages: Array;
-
- /**
- * The identifier of the model to use. The model must be registered with Llama
- * Stack and available via the /models endpoint.
- */
- model_id: string;
-
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- logprobs?: InferenceChatCompletionParams.Logprobs;
-
- /**
- * (Optional) Grammar specification for guided (structured) decoding. There are two
- * options: - `ResponseFormat.json_schema`: The grammar is a JSON schema. Most
- * providers support this format. - `ResponseFormat.grammar`: The grammar is a BNF
- * grammar. This format is more flexible, but not all providers support it.
- */
- response_format?: Shared.ResponseFormat;
-
- /**
- * Parameters to control the sampling strategy.
- */
- sampling_params?: Shared.SamplingParams;
-
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream?: boolean;
-
- /**
- * (Optional) Whether tool use is required or automatic. Defaults to
- * ToolChoice.auto. .. deprecated:: Use tool_config instead.
- */
- tool_choice?: 'auto' | 'required' | 'none';
-
- /**
- * (Optional) Configuration for tool use.
- */
- tool_config?: InferenceChatCompletionParams.ToolConfig;
-
- /**
- * (Optional) Instructs the model how to format tool calls. By default, Llama Stack
- * will attempt to use a format that is best adapted to the model. -
- * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. -
- * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a
- * tag. - `ToolPromptFormat.python_list`: The tool calls
- * are output as Python syntax -- a list of function calls. .. deprecated:: Use
- * tool_config instead.
- */
- tool_prompt_format?: 'json' | 'function_tag' | 'python_list';
-
- /**
- * (Optional) List of tool definitions available to the model.
- */
- tools?: Array;
-}
-
-export namespace InferenceChatCompletionParams {
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- export interface Logprobs {
- /**
- * How many tokens (for each position) to return log probabilities for.
- */
- top_k?: number;
- }
-
- /**
- * (Optional) Configuration for tool use.
- */
- export interface ToolConfig {
- /**
- * (Optional) Config for how to override the default system prompt. -
- * `SystemMessageBehavior.append`: Appends the provided system message to the
- * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default
- * system prompt with the provided system message. The system message can include
- * the string '{{function_definitions}}' to indicate where the function definitions
- * should be inserted.
- */
- system_message_behavior?: 'append' | 'replace';
-
- /**
- * (Optional) Whether tool use is automatic, required, or none. Can also specify a
- * tool name to use a specific tool. Defaults to ToolChoice.auto.
- */
- tool_choice?: 'auto' | 'required' | 'none' | (string & {});
-
- /**
- * (Optional) Instructs the model how to format tool calls. By default, Llama Stack
- * will attempt to use a format that is best adapted to the model. -
- * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. -
- * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a
- * tag. - `ToolPromptFormat.python_list`: The tool calls
- * are output as Python syntax -- a list of function calls.
- */
- tool_prompt_format?: 'json' | 'function_tag' | 'python_list';
- }
-
- export interface Tool {
- tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {});
-
- description?: string;
-
- parameters?: { [key: string]: Shared.ToolParamDefinition };
- }
-
- export type InferenceChatCompletionParamsNonStreaming =
- InferenceAPI.InferenceChatCompletionParamsNonStreaming;
- export type InferenceChatCompletionParamsStreaming = InferenceAPI.InferenceChatCompletionParamsStreaming;
-}
-
-export interface InferenceChatCompletionParamsNonStreaming extends InferenceChatCompletionParamsBase {
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream?: false;
-}
-
-export interface InferenceChatCompletionParamsStreaming extends InferenceChatCompletionParamsBase {
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream: true;
-}
-
-export type InferenceCompletionParams =
- | InferenceCompletionParamsNonStreaming
- | InferenceCompletionParamsStreaming;
-
-export interface InferenceCompletionParamsBase {
- /**
- * The content to generate a completion for.
- */
- content: Shared.InterleavedContent;
-
- /**
- * The identifier of the model to use. The model must be registered with Llama
- * Stack and available via the /models endpoint.
- */
- model_id: string;
-
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- logprobs?: InferenceCompletionParams.Logprobs;
-
- /**
- * (Optional) Grammar specification for guided (structured) decoding.
- */
- response_format?: Shared.ResponseFormat;
-
- /**
- * (Optional) Parameters to control the sampling strategy.
- */
- sampling_params?: Shared.SamplingParams;
-
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream?: boolean;
-}
-
-export namespace InferenceCompletionParams {
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- export interface Logprobs {
- /**
- * How many tokens (for each position) to return log probabilities for.
- */
- top_k?: number;
- }
-
- export type InferenceCompletionParamsNonStreaming = InferenceAPI.InferenceCompletionParamsNonStreaming;
- export type InferenceCompletionParamsStreaming = InferenceAPI.InferenceCompletionParamsStreaming;
-}
-
-export interface InferenceCompletionParamsNonStreaming extends InferenceCompletionParamsBase {
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream?: false;
-}
-
-export interface InferenceCompletionParamsStreaming extends InferenceCompletionParamsBase {
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream: true;
-}
-
-export interface InferenceEmbeddingsParams {
- /**
- * List of contents to generate embeddings for. Each content can be a string or an
- * InterleavedContentItem (and hence can be multimodal). The behavior depends on
- * the model and provider. Some models may only support text.
- */
- contents: Array | Array;
-
- /**
- * The identifier of the model to use. The model must be an embedding model
- * registered with Llama Stack and available via the /models endpoint.
- */
- model_id: string;
-
- /**
- * (Optional) Output dimensionality for the embeddings. Only supported by
- * Matryoshka models.
- */
- output_dimension?: number;
-
- /**
- * (Optional) How is the embedding being used? This is only supported by asymmetric
- * embedding models.
- */
- task_type?: 'query' | 'document';
-
- /**
- * (Optional) Config for how to truncate text for embedding when text is longer
- * than the model's max sequence length.
- */
- text_truncation?: 'none' | 'start' | 'end';
-}
-
-export interface InferenceRerankParams {
- /**
- * List of items to rerank. Each item can be a string, text content part, or image
- * content part. Each input must not exceed the model's max input token length.
- */
- items: Array<
- | string
- | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam
- | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam
- >;
-
- /**
- * The identifier of the reranking model to use.
- */
- model: string;
-
- /**
- * The search query to rank items against. Can be a string, text content part, or
- * image content part. The input must not exceed the model's max input token
- * length.
- */
- query:
- | string
- | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam
- | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam;
-
- /**
- * (Optional) Maximum number of results to return. Default: returns all.
- */
- max_num_results?: number;
-}
-
-export namespace InferenceRerankParams {
- /**
- * Text content part for OpenAI-compatible chat completion messages.
- */
- export interface OpenAIChatCompletionContentPartTextParam {
- /**
- * The text content of the message
- */
- text: string;
-
- /**
- * Must be "text" to identify this as text content
- */
- type: 'text';
- }
-
- /**
- * Image content part for OpenAI-compatible chat completion messages.
- */
- export interface OpenAIChatCompletionContentPartImageParam {
- /**
- * Image URL specification and processing details
- */
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- /**
- * Must be "image_url" to identify this as image content
- */
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- /**
- * Image URL specification and processing details
- */
- export interface ImageURL {
- /**
- * URL of the image to include in the message
- */
- url: string;
-
- /**
- * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
- */
- detail?: string;
- }
- }
-
- /**
- * Text content part for OpenAI-compatible chat completion messages.
- */
- export interface OpenAIChatCompletionContentPartTextParam {
- /**
- * The text content of the message
- */
- text: string;
-
- /**
- * Must be "text" to identify this as text content
- */
- type: 'text';
- }
-
- /**
- * Image content part for OpenAI-compatible chat completion messages.
- */
- export interface OpenAIChatCompletionContentPartImageParam {
- /**
- * Image URL specification and processing details
- */
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- /**
- * Must be "image_url" to identify this as image content
- */
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- /**
- * Image URL specification and processing details
- */
- export interface ImageURL {
- /**
- * URL of the image to include in the message
- */
- url: string;
-
- /**
- * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
- */
- detail?: string;
- }
- }
-}
-
-export declare namespace Inference {
- export {
- type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk,
- type CompletionResponse as CompletionResponse,
- type EmbeddingsResponse as EmbeddingsResponse,
- type TokenLogProbs as TokenLogProbs,
- type InferenceBatchChatCompletionResponse as InferenceBatchChatCompletionResponse,
- type InferenceRerankResponse as InferenceRerankResponse,
- type InferenceBatchChatCompletionParams as InferenceBatchChatCompletionParams,
- type InferenceBatchCompletionParams as InferenceBatchCompletionParams,
- type InferenceChatCompletionParams as InferenceChatCompletionParams,
- type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming,
- type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming,
- type InferenceCompletionParams as InferenceCompletionParams,
- type InferenceCompletionParamsNonStreaming as InferenceCompletionParamsNonStreaming,
- type InferenceCompletionParamsStreaming as InferenceCompletionParamsStreaming,
- type InferenceEmbeddingsParams as InferenceEmbeddingsParams,
- type InferenceRerankParams as InferenceRerankParams,
- };
-}
diff --git a/src/resources/inspect.ts b/src/resources/inspect.ts
index ebe2979..b9c26ac 100644
--- a/src/resources/inspect.ts
+++ b/src/resources/inspect.ts
@@ -11,14 +11,14 @@ import * as Core from '../core';
export class Inspect extends APIResource {
/**
- * Get the current health status of the service.
+ * Get health status. Get the current health status of the service.
*/
health(options?: Core.RequestOptions): Core.APIPromise {
return this._client.get('/v1/health', options);
}
/**
- * Get the version of the service.
+ * Get version. Get the version of the service.
*/
version(options?: Core.RequestOptions): Core.APIPromise {
return this._client.get('/v1/version', options);
diff --git a/src/resources/models/index.ts b/src/resources/models/index.ts
index 6b5fa61..f06b472 100644
--- a/src/resources/models/index.ts
+++ b/src/resources/models/index.ts
@@ -13,4 +13,4 @@ export {
type ModelListResponse,
type ModelRegisterParams,
} from './models';
-export { OpenAI, type OpenAIListResponse } from './openai';
+export { OpenAI } from './openai';
diff --git a/src/resources/models/models.ts b/src/resources/models/models.ts
index 4db5c01..d76baa2 100644
--- a/src/resources/models/models.ts
+++ b/src/resources/models/models.ts
@@ -9,13 +9,13 @@
import { APIResource } from '../../resource';
import * as Core from '../../core';
import * as OpenAIAPI from './openai';
-import { OpenAI, OpenAIListResponse } from './openai';
+import { OpenAI } from './openai';
export class Models extends APIResource {
openai: OpenAIAPI.OpenAI = new OpenAIAPI.OpenAI(this._client);
/**
- * Get a model by its identifier.
+ * Get model. Get a model by its identifier.
*/
retrieve(modelId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.get(`/v1/models/${modelId}`, options);
@@ -31,14 +31,14 @@ export class Models extends APIResource {
}
/**
- * Register a model.
+ * Register model. Register a model.
*/
register(body: ModelRegisterParams, options?: Core.RequestOptions): Core.APIPromise {
return this._client.post('/v1/models', { body, ...options });
}
/**
- * Unregister a model.
+ * Unregister model. Unregister a model.
*/
unregister(modelId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.delete(`/v1/models/${modelId}`, {
@@ -69,7 +69,7 @@ export interface Model {
/**
* The type of model (LLM or embedding model)
*/
- model_type: 'llm' | 'embedding';
+ model_type: 'llm' | 'embedding' | 'rerank';
/**
* ID of the provider that owns this resource
@@ -103,7 +103,7 @@ export interface ModelRegisterParams {
/**
* The type of model to register.
*/
- model_type?: 'llm' | 'embedding';
+ model_type?: 'llm' | 'embedding' | 'rerank';
/**
* The identifier of the provider.
@@ -126,5 +126,5 @@ export declare namespace Models {
type ModelRegisterParams as ModelRegisterParams,
};
- export { OpenAI as OpenAI, type OpenAIListResponse as OpenAIListResponse };
+ export { OpenAI as OpenAI };
}
diff --git a/src/resources/models/openai.ts b/src/resources/models/openai.ts
index 01155ee..589acdf 100644
--- a/src/resources/models/openai.ts
+++ b/src/resources/models/openai.ts
@@ -8,35 +8,15 @@
import { APIResource } from '../../resource';
import * as Core from '../../core';
+import * as ModelsAPI from './models';
export class OpenAI extends APIResource {
/**
- * List models using the OpenAI API.
+ * List all models.
*/
- list(options?: Core.RequestOptions): Core.APIPromise {
+ list(options?: Core.RequestOptions): Core.APIPromise {
return (
- this._client.get('/v1/openai/v1/models', options) as Core.APIPromise<{ data: OpenAIListResponse }>
+ this._client.get('/v1/models', options) as Core.APIPromise<{ data: ModelsAPI.ModelListResponse }>
)._thenUnwrap((obj) => obj.data);
}
}
-
-export type OpenAIListResponse = Array;
-
-export namespace OpenAIListResponse {
- /**
- * A model from OpenAI.
- */
- export interface OpenAIListResponseItem {
- id: string;
-
- created: number;
-
- object: 'model';
-
- owned_by: string;
- }
-}
-
-export declare namespace OpenAI {
- export { type OpenAIListResponse as OpenAIListResponse };
-}
diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts
index 2d27db1..29377d4 100644
--- a/src/resources/moderations.ts
+++ b/src/resources/moderations.ts
@@ -11,10 +11,11 @@ import * as Core from '../core';
export class Moderations extends APIResource {
/**
- * Classifies if text and/or image inputs are potentially harmful.
+ * Create moderation. Classifies if text and/or image inputs are potentially
+ * harmful.
*/
create(body: ModerationCreateParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/openai/v1/moderations', { body, ...options });
+ return this._client.post('/v1/moderations', { body, ...options });
}
}
@@ -77,9 +78,9 @@ export interface ModerationCreateParams {
input: string | Array;
/**
- * The content moderation model you would like to use.
+ * (Optional) The content moderation model you would like to use.
*/
- model: string;
+ model?: string;
}
export declare namespace Moderations {
diff --git a/src/resources/prompts.ts b/src/resources/prompts.ts
new file mode 100644
index 0000000..3ebc77f
--- /dev/null
+++ b/src/resources/prompts.ts
@@ -0,0 +1,3 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export * from './prompts/index';
diff --git a/src/resources/prompts/index.ts b/src/resources/prompts/index.ts
new file mode 100644
index 0000000..77a24d7
--- /dev/null
+++ b/src/resources/prompts/index.ts
@@ -0,0 +1,13 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export {
+ Prompts,
+ type ListPromptsResponse,
+ type Prompt,
+ type PromptListResponse,
+ type PromptCreateParams,
+ type PromptRetrieveParams,
+ type PromptUpdateParams,
+ type PromptSetDefaultVersionParams,
+} from './prompts';
+export { Versions } from './versions';
diff --git a/src/resources/prompts/prompts.ts b/src/resources/prompts/prompts.ts
new file mode 100644
index 0000000..6dca328
--- /dev/null
+++ b/src/resources/prompts/prompts.ts
@@ -0,0 +1,181 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import { isRequestOptions } from '../../core';
+import * as Core from '../../core';
+import * as VersionsAPI from './versions';
+import { Versions } from './versions';
+
+export class Prompts extends APIResource {
+ versions: VersionsAPI.Versions = new VersionsAPI.Versions(this._client);
+
+ /**
+ * Create prompt. Create a new prompt.
+ */
+ create(body: PromptCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.post('/v1/prompts', { body, ...options });
+ }
+
+ /**
+ * Get prompt. Get a prompt by its identifier and optional version.
+ */
+ retrieve(
+ promptId: string,
+ query?: PromptRetrieveParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise;
+ retrieve(promptId: string, options?: Core.RequestOptions): Core.APIPromise;
+ retrieve(
+ promptId: string,
+ query: PromptRetrieveParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ if (isRequestOptions(query)) {
+ return this.retrieve(promptId, {}, query);
+ }
+ return this._client.get(`/v1/prompts/${promptId}`, { query, ...options });
+ }
+
+ /**
+ * Update prompt. Update an existing prompt (increments version).
+ */
+ update(promptId: string, body: PromptUpdateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.post(`/v1/prompts/${promptId}`, { body, ...options });
+ }
+
+ /**
+ * List all prompts.
+ */
+ list(options?: Core.RequestOptions): Core.APIPromise {
+ return (
+ this._client.get('/v1/prompts', options) as Core.APIPromise<{ data: PromptListResponse }>
+ )._thenUnwrap((obj) => obj.data);
+ }
+
+ /**
+ * Delete prompt. Delete a prompt.
+ */
+ delete(promptId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.delete(`/v1/prompts/${promptId}`, {
+ ...options,
+ headers: { Accept: '*/*', ...options?.headers },
+ });
+ }
+
+ /**
+ * Set prompt version. Set which version of a prompt should be the default in
+ * get_prompt (latest).
+ */
+ setDefaultVersion(
+ promptId: string,
+ body: PromptSetDefaultVersionParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/v1/prompts/${promptId}/set-default-version`, { body, ...options });
+ }
+}
+
+/**
+ * Response model to list prompts.
+ */
+export interface ListPromptsResponse {
+ data: PromptListResponse;
+}
+
+/**
+ * A prompt resource representing a stored OpenAI Compatible prompt template in
+ * Llama Stack.
+ */
+export interface Prompt {
+ /**
+ * Boolean indicating whether this version is the default version for this prompt
+ */
+ is_default: boolean;
+
+ /**
+ * Unique identifier formatted as 'pmpt\_<48-digit-hash>'
+ */
+ prompt_id: string;
+
+ /**
+ * List of prompt variable names that can be used in the prompt template
+ */
+ variables: Array;
+
+ /**
+ * Version (integer starting at 1, incremented on save)
+ */
+ version: number;
+
+ /**
+ * The system prompt text with variable placeholders. Variables are only supported
+ * when using the Responses API.
+ */
+ prompt?: string;
+}
+
+export type PromptListResponse = Array;
+
+export interface PromptCreateParams {
+ /**
+ * The prompt text content with variable placeholders.
+ */
+ prompt: string;
+
+ /**
+ * List of variable names that can be used in the prompt template.
+ */
+ variables?: Array;
+}
+
+export interface PromptRetrieveParams {
+ /**
+ * The version of the prompt to get (defaults to latest).
+ */
+ version?: number;
+}
+
+export interface PromptUpdateParams {
+ /**
+ * The updated prompt text content.
+ */
+ prompt: string;
+
+ /**
+ * Set the new version as the default (default=True).
+ */
+ set_as_default: boolean;
+
+ /**
+ * The current version of the prompt being updated.
+ */
+ version: number;
+
+ /**
+ * Updated list of variable names that can be used in the prompt template.
+ */
+ variables?: Array;
+}
+
+export interface PromptSetDefaultVersionParams {
+ /**
+ * The version to set as default.
+ */
+ version: number;
+}
+
+Prompts.Versions = Versions;
+
+export declare namespace Prompts {
+ export {
+ type ListPromptsResponse as ListPromptsResponse,
+ type Prompt as Prompt,
+ type PromptListResponse as PromptListResponse,
+ type PromptCreateParams as PromptCreateParams,
+ type PromptRetrieveParams as PromptRetrieveParams,
+ type PromptUpdateParams as PromptUpdateParams,
+ type PromptSetDefaultVersionParams as PromptSetDefaultVersionParams,
+ };
+
+ export { Versions as Versions };
+}
diff --git a/src/resources/prompts/versions.ts b/src/resources/prompts/versions.ts
new file mode 100644
index 0000000..da2931b
--- /dev/null
+++ b/src/resources/prompts/versions.ts
@@ -0,0 +1,18 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import * as Core from '../../core';
+import * as PromptsAPI from './prompts';
+
+export class Versions extends APIResource {
+ /**
+ * List prompt versions. List all versions of a specific prompt.
+ */
+ list(promptId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return (
+ this._client.get(`/v1/prompts/${promptId}/versions`, options) as Core.APIPromise<{
+ data: PromptsAPI.PromptListResponse;
+ }>
+ )._thenUnwrap((obj) => obj.data);
+ }
+}
diff --git a/src/resources/providers.ts b/src/resources/providers.ts
index da48e0a..c4ded28 100644
--- a/src/resources/providers.ts
+++ b/src/resources/providers.ts
@@ -12,14 +12,14 @@ import * as InspectAPI from './inspect';
export class Providers extends APIResource {
/**
- * Get detailed information about a specific provider.
+ * Get provider. Get detailed information about a specific provider.
*/
retrieve(providerId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.get(`/v1/providers/${providerId}`, options);
}
/**
- * List all available providers.
+ * List providers. List all available providers.
*/
list(options?: Core.RequestOptions): Core.APIPromise {
return (
diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts
index 99c6323..940dc23 100644
--- a/src/resources/responses/input-items.ts
+++ b/src/resources/responses/input-items.ts
@@ -12,7 +12,7 @@ import * as Core from '../../core';
export class InputItems extends APIResource {
/**
- * List input items for a given OpenAI response.
+ * List input items.
*/
list(
responseId: string,
@@ -28,7 +28,7 @@ export class InputItems extends APIResource {
if (isRequestOptions(query)) {
return this.list(responseId, {}, query);
}
- return this._client.get(`/v1/openai/v1/responses/${responseId}/input_items`, { query, ...options });
+ return this._client.get(`/v1/responses/${responseId}/input_items`, { query, ...options });
}
}
@@ -40,10 +40,15 @@ export interface InputItemListResponse {
* List of input items
*/
data: Array<
+ | InputItemListResponse.OpenAIResponseMessage
| InputItemListResponse.OpenAIResponseOutputMessageWebSearchToolCall
| InputItemListResponse.OpenAIResponseOutputMessageFileSearchToolCall
| InputItemListResponse.OpenAIResponseOutputMessageFunctionToolCall
+ | InputItemListResponse.OpenAIResponseOutputMessageMcpCall
+ | InputItemListResponse.OpenAIResponseOutputMessageMcpListTools
+ | InputItemListResponse.OpenAIResponseMcpApprovalRequest
| InputItemListResponse.OpenAIResponseInputFunctionToolCallOutput
+ | InputItemListResponse.OpenAIResponseMcpApprovalResponse
| InputItemListResponse.OpenAIResponseMessage
>;
@@ -54,6 +59,176 @@ export interface InputItemListResponse {
}
export namespace InputItemListResponse {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
/**
* Web search tool call output message for OpenAI responses.
*/
@@ -171,6 +346,108 @@ export namespace InputItemListResponse {
status?: string;
}
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
/**
* This represents the output of a function call that gets passed back to the
* model.
@@ -187,6 +464,21 @@ export namespace InputItemListResponse {
status?: string;
}
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
/**
* Corresponds to the various Message types in the Responses API. They are all
* under one type because the Responses API gives them all the same "type" value,
@@ -199,7 +491,10 @@ export namespace InputItemListResponse {
| OpenAIResponseMessage.OpenAIResponseInputMessageContentText
| OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
>
- | Array;
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
role: 'system' | 'developer' | 'user' | 'assistant';
@@ -246,12 +541,12 @@ export namespace InputItemListResponse {
image_url?: string;
}
- export interface UnionMember2 {
+ export interface OpenAIResponseOutputMessageContentOutputText {
annotations: Array<
- | UnionMember2.OpenAIResponseAnnotationFileCitation
- | UnionMember2.OpenAIResponseAnnotationCitation
- | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
- | UnionMember2.OpenAIResponseAnnotationFilePath
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
>;
text: string;
@@ -259,7 +554,7 @@ export namespace InputItemListResponse {
type: 'output_text';
}
- export namespace UnionMember2 {
+ export namespace OpenAIResponseOutputMessageContentOutputText {
/**
* File citation annotation for referencing specific files in response content.
*/
@@ -337,6 +632,21 @@ export namespace InputItemListResponse {
type: 'file_path';
}
}
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
}
}
diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts
index 736846b..fc7bad4 100644
--- a/src/resources/responses/responses.ts
+++ b/src/resources/responses/responses.ts
@@ -20,7 +20,7 @@ export class Responses extends APIResource {
inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client);
/**
- * Create a new OpenAI response.
+ * Create a model response.
*/
create(body: ResponseCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise;
create(
@@ -35,22 +35,20 @@ export class Responses extends APIResource {
body: ResponseCreateParams,
options?: Core.RequestOptions,
): APIPromise | APIPromise> {
- return this._client.post('/v1/openai/v1/responses', {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
+ return this._client.post('/v1/responses', { body, ...options, stream: body.stream ?? false }) as
+ | APIPromise
+ | APIPromise>;
}
/**
- * Retrieve an OpenAI response by its ID.
+ * Get a model response.
*/
retrieve(responseId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/openai/v1/responses/${responseId}`, options);
+ return this._client.get(`/v1/responses/${responseId}`, options);
}
/**
- * List all OpenAI responses.
+ * List all responses.
*/
list(
query?: ResponseListParams,
@@ -66,17 +64,17 @@ export class Responses extends APIResource {
if (isRequestOptions(query)) {
return this.list({}, query);
}
- return this._client.getAPIList('/v1/openai/v1/responses', ResponseListResponsesOpenAICursorPage, {
+ return this._client.getAPIList('/v1/responses', ResponseListResponsesOpenAICursorPage, {
query,
...options,
});
}
/**
- * Delete an OpenAI response by its ID.
+ * Delete a response.
*/
delete(responseId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/openai/v1/responses/${responseId}`, options);
+ return this._client.delete(`/v1/responses/${responseId}`, options);
}
}
@@ -116,6 +114,7 @@ export interface ResponseObject {
| ResponseObject.OpenAIResponseOutputMessageFunctionToolCall
| ResponseObject.OpenAIResponseOutputMessageMcpCall
| ResponseObject.OpenAIResponseOutputMessageMcpListTools
+ | ResponseObject.OpenAIResponseMcpApprovalRequest
>;
/**
@@ -138,6 +137,11 @@ export interface ResponseObject {
*/
error?: ResponseObject.Error;
+ /**
+ * (Optional) System message inserted into the model's context
+ */
+ instructions?: string;
+
/**
* (Optional) ID of the previous response in a conversation
*/
@@ -148,6 +152,16 @@ export interface ResponseObject {
*/
temperature?: number;
+ /**
+ * (Optional) An array of tools the model may call while generating a response.
+ */
+ tools?: Array<
+ | ResponseObject.OpenAIResponseInputToolWebSearch
+ | ResponseObject.OpenAIResponseInputToolFileSearch
+ | ResponseObject.OpenAIResponseInputToolFunction
+ | ResponseObject.OpenAIResponseToolMcp
+ >;
+
/**
* (Optional) Nucleus sampling parameter used for generation
*/
@@ -159,9 +173,9 @@ export interface ResponseObject {
truncation?: string;
/**
- * (Optional) User identifier associated with the request
+ * (Optional) Token usage information for the response
*/
- user?: string;
+ usage?: ResponseObject.Usage;
}
export namespace ResponseObject {
@@ -177,7 +191,10 @@ export namespace ResponseObject {
| OpenAIResponseMessage.OpenAIResponseInputMessageContentText
| OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
>
- | Array;
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
role: 'system' | 'developer' | 'user' | 'assistant';
@@ -224,12 +241,12 @@ export namespace ResponseObject {
image_url?: string;
}
- export interface UnionMember2 {
+ export interface OpenAIResponseOutputMessageContentOutputText {
annotations: Array<
- | UnionMember2.OpenAIResponseAnnotationFileCitation
- | UnionMember2.OpenAIResponseAnnotationCitation
- | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
- | UnionMember2.OpenAIResponseAnnotationFilePath
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
>;
text: string;
@@ -237,7 +254,7 @@ export namespace ResponseObject {
type: 'output_text';
}
- export namespace UnionMember2 {
+ export namespace OpenAIResponseOutputMessageContentOutputText {
/**
* File citation annotation for referencing specific files in response content.
*/
@@ -315,6 +332,21 @@ export namespace ResponseObject {
type: 'file_path';
}
}
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
}
/**
@@ -521,6 +553,21 @@ export namespace ResponseObject {
}
}
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
/**
* Text formatting configuration for the response
*/
@@ -579,6 +626,182 @@ export namespace ResponseObject {
*/
message: string;
}
+
+ /**
+ * Web search tool configuration for OpenAI response inputs.
+ */
+ export interface OpenAIResponseInputToolWebSearch {
+ /**
+ * Web search tool type variant to use
+ */
+ type: 'web_search' | 'web_search_preview' | 'web_search_preview_2025_03_11';
+
+ /**
+ * (Optional) Size of search context, must be "low", "medium", or "high"
+ */
+ search_context_size?: string;
+ }
+
+ /**
+ * File search tool configuration for OpenAI response inputs.
+ */
+ export interface OpenAIResponseInputToolFileSearch {
+ /**
+ * Tool type identifier, always "file_search"
+ */
+ type: 'file_search';
+
+ /**
+ * List of vector store identifiers to search within
+ */
+ vector_store_ids: Array;
+
+ /**
+ * (Optional) Additional filters to apply to the search
+ */
+ filters?: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * (Optional) Maximum number of search results to return (1-50)
+ */
+ max_num_results?: number;
+
+ /**
+ * (Optional) Options for ranking and scoring search results
+ */
+ ranking_options?: OpenAIResponseInputToolFileSearch.RankingOptions;
+ }
+
+ export namespace OpenAIResponseInputToolFileSearch {
+ /**
+ * (Optional) Options for ranking and scoring search results
+ */
+ export interface RankingOptions {
+ /**
+ * (Optional) Name of the ranking algorithm to use
+ */
+ ranker?: string;
+
+ /**
+ * (Optional) Minimum relevance score threshold for results
+ */
+ score_threshold?: number;
+ }
+ }
+
+ /**
+ * Function tool configuration for OpenAI response inputs.
+ */
+ export interface OpenAIResponseInputToolFunction {
+ /**
+ * Name of the function that can be called
+ */
+ name: string;
+
+ /**
+ * Tool type identifier, always "function"
+ */
+ type: 'function';
+
+ /**
+ * (Optional) Description of what the function does
+ */
+ description?: string;
+
+ /**
+ * (Optional) JSON schema defining the function's parameters
+ */
+ parameters?: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * (Optional) Whether to enforce strict parameter validation
+ */
+ strict?: boolean;
+ }
+
+ /**
+ * Model Context Protocol (MCP) tool configuration for OpenAI response object.
+ */
+ export interface OpenAIResponseToolMcp {
+ /**
+ * Label to identify this MCP server
+ */
+ server_label: string;
+
+ /**
+ * Tool type identifier, always "mcp"
+ */
+ type: 'mcp';
+
+ /**
+ * (Optional) Restriction on which tools can be used from this server
+ */
+ allowed_tools?: Array | OpenAIResponseToolMcp.AllowedToolsFilter;
+ }
+
+ export namespace OpenAIResponseToolMcp {
+ /**
+ * Filter configuration for restricting which MCP tools can be used.
+ */
+ export interface AllowedToolsFilter {
+ /**
+ * (Optional) List of specific tool names that are allowed
+ */
+ tool_names?: Array;
+ }
+ }
+
+ /**
+ * (Optional) Token usage information for the response
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the input
+ */
+ input_tokens: number;
+
+ /**
+ * Number of tokens in the output
+ */
+ output_tokens: number;
+
+ /**
+ * Total tokens used (input + output)
+ */
+ total_tokens: number;
+
+ /**
+ * Detailed breakdown of input token usage
+ */
+ input_tokens_details?: Usage.InputTokensDetails;
+
+ /**
+ * Detailed breakdown of output token usage
+ */
+ output_tokens_details?: Usage.OutputTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Detailed breakdown of input token usage
+ */
+ export interface InputTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+
+ /**
+ * Detailed breakdown of output token usage
+ */
+ export interface OutputTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+ }
}
/**
@@ -586,6 +809,7 @@ export namespace ResponseObject {
*/
export type ResponseObjectStream =
| ResponseObjectStream.OpenAIResponseObjectStreamResponseCreated
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseInProgress
| ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemAdded
| ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemDone
| ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextDelta
@@ -605,6 +829,20 @@ export type ResponseObjectStream =
| ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallCompleted
| ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartAdded
| ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningTextDelta
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningTextDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryPartDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryTextDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseRefusalDelta
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseRefusalDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseFileSearchCallInProgress
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseFileSearchCallSearching
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseFileSearchCallCompleted
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseIncomplete
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseFailed
| ResponseObjectStream.OpenAIResponseObjectStreamResponseCompleted;
export namespace ResponseObjectStream {
@@ -613,7 +851,7 @@ export namespace ResponseObjectStream {
*/
export interface OpenAIResponseObjectStreamResponseCreated {
/**
- * The newly created response object
+ * The response object that was created
*/
response: ResponsesAPI.ResponseObject;
@@ -623,6 +861,26 @@ export namespace ResponseObjectStream {
type: 'response.created';
}
+ /**
+ * Streaming event indicating the response remains in progress.
+ */
+ export interface OpenAIResponseObjectStreamResponseInProgress {
+ /**
+ * Current response state while in progress
+ */
+ response: ResponsesAPI.ResponseObject;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.in_progress"
+ */
+ type: 'response.in_progress';
+ }
+
/**
* Streaming event for when a new output item is added to the response.
*/
@@ -636,7 +894,8 @@ export namespace ResponseObjectStream {
| OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFileSearchToolCall
| OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFunctionToolCall
| OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpCall
- | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpListTools;
+ | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpListTools
+ | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseMcpApprovalRequest;
/**
* Index position of this item in the output list
@@ -672,7 +931,10 @@ export namespace ResponseObjectStream {
| OpenAIResponseMessage.OpenAIResponseInputMessageContentText
| OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
>
- | Array;
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
role: 'system' | 'developer' | 'user' | 'assistant';
@@ -719,12 +981,12 @@ export namespace ResponseObjectStream {
image_url?: string;
}
- export interface UnionMember2 {
+ export interface OpenAIResponseOutputMessageContentOutputText {
annotations: Array<
- | UnionMember2.OpenAIResponseAnnotationFileCitation
- | UnionMember2.OpenAIResponseAnnotationCitation
- | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
- | UnionMember2.OpenAIResponseAnnotationFilePath
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
>;
text: string;
@@ -732,7 +994,7 @@ export namespace ResponseObjectStream {
type: 'output_text';
}
- export namespace UnionMember2 {
+ export namespace OpenAIResponseOutputMessageContentOutputText {
/**
* File citation annotation for referencing specific files in response content.
*/
@@ -810,6 +1072,21 @@ export namespace ResponseObjectStream {
type: 'file_path';
}
}
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
}
/**
@@ -1015,6 +1292,21 @@ export namespace ResponseObjectStream {
description?: string;
}
}
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
}
/**
@@ -1030,7 +1322,8 @@ export namespace ResponseObjectStream {
| OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageFileSearchToolCall
| OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageFunctionToolCall
| OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpCall
- | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpListTools;
+ | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpListTools
+ | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseMcpApprovalRequest;
/**
* Index position of this item in the output list
@@ -1066,7 +1359,10 @@ export namespace ResponseObjectStream {
| OpenAIResponseMessage.OpenAIResponseInputMessageContentText
| OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
>
- | Array;
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
role: 'system' | 'developer' | 'user' | 'assistant';
@@ -1113,12 +1409,12 @@ export namespace ResponseObjectStream {
image_url?: string;
}
- export interface UnionMember2 {
+ export interface OpenAIResponseOutputMessageContentOutputText {
annotations: Array<
- | UnionMember2.OpenAIResponseAnnotationFileCitation
- | UnionMember2.OpenAIResponseAnnotationCitation
- | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
- | UnionMember2.OpenAIResponseAnnotationFilePath
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
>;
text: string;
@@ -1126,7 +1422,7 @@ export namespace ResponseObjectStream {
type: 'output_text';
}
- export namespace UnionMember2 {
+ export namespace OpenAIResponseOutputMessageContentOutputText {
/**
* File citation annotation for referencing specific files in response content.
*/
@@ -1204,6 +1500,21 @@ export namespace ResponseObjectStream {
type: 'file_path';
}
}
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
}
/**
@@ -1409,6 +1720,21 @@ export namespace ResponseObjectStream {
description?: string;
}
}
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
}
/**
@@ -1702,17 +2028,28 @@ export namespace ResponseObjectStream {
* Streaming event for when a new content part is added to a response item.
*/
export interface OpenAIResponseObjectStreamResponseContentPartAdded {
+ /**
+ * Index position of the part within the content array
+ */
+ content_index: number;
+
/**
* Unique identifier of the output item containing this content part
*/
item_id: string;
+ /**
+ * Index position of the output item in the response
+ */
+ output_index: number;
+
/**
* The content part that was added
*/
part:
| OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartOutputText
- | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartRefusal;
+ | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartRefusal
+ | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartReasoningText;
/**
* Unique identifier of the response containing this content
@@ -1731,34 +2068,172 @@ export namespace ResponseObjectStream {
}
export namespace OpenAIResponseObjectStreamResponseContentPartAdded {
+ /**
+ * Text content within a streamed response part.
+ */
export interface OpenAIResponseContentPartOutputText {
+ /**
+ * Structured annotations associated with the text
+ */
+ annotations: Array<
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ /**
+ * Text emitted for this content part
+ */
text: string;
+ /**
+ * Content part type identifier, always "output_text"
+ */
type: 'output_text';
+
+ /**
+ * (Optional) Token log probability details
+ */
+ logprobs?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
+ }
+
+ export namespace OpenAIResponseContentPartOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
}
+ /**
+ * Refusal content within a streamed response part.
+ */
export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
refusal: string;
+ /**
+ * Content part type identifier, always "refusal"
+ */
type: 'refusal';
}
+
+ /**
+ * Reasoning text emitted as part of a streamed response.
+ */
+ export interface OpenAIResponseContentPartReasoningText {
+ /**
+ * Reasoning text supplied by the model
+ */
+ text: string;
+
+ /**
+ * Content part type identifier, always "reasoning_text"
+ */
+ type: 'reasoning_text';
+ }
}
/**
* Streaming event for when a content part is completed.
*/
export interface OpenAIResponseObjectStreamResponseContentPartDone {
+ /**
+ * Index position of the part within the content array
+ */
+ content_index: number;
+
/**
* Unique identifier of the output item containing this content part
*/
item_id: string;
+ /**
+ * Index position of the output item in the response
+ */
+ output_index: number;
+
/**
* The completed content part
*/
part:
| OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartOutputText
- | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartRefusal;
+ | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartRefusal
+ | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartReasoningText;
/**
* Unique identifier of the response containing this content
@@ -1777,129 +2252,999 @@ export namespace ResponseObjectStream {
}
export namespace OpenAIResponseObjectStreamResponseContentPartDone {
+ /**
+ * Text content within a streamed response part.
+ */
export interface OpenAIResponseContentPartOutputText {
+ /**
+ * Structured annotations associated with the text
+ */
+ annotations: Array<
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ /**
+ * Text emitted for this content part
+ */
text: string;
+ /**
+ * Content part type identifier, always "output_text"
+ */
type: 'output_text';
+
+ /**
+ * (Optional) Token log probability details
+ */
+ logprobs?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
}
+ export namespace OpenAIResponseContentPartOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
refusal: string;
+ /**
+ * Content part type identifier, always "refusal"
+ */
type: 'refusal';
}
+
+ /**
+ * Reasoning text emitted as part of a streamed response.
+ */
+ export interface OpenAIResponseContentPartReasoningText {
+ /**
+ * Reasoning text supplied by the model
+ */
+ text: string;
+
+ /**
+ * Content part type identifier, always "reasoning_text"
+ */
+ type: 'reasoning_text';
+ }
}
/**
- * Streaming event indicating a response has been completed.
+ * Streaming event for incremental reasoning text updates.
*/
- export interface OpenAIResponseObjectStreamResponseCompleted {
+ export interface OpenAIResponseObjectStreamResponseReasoningTextDelta {
/**
- * The completed response object
+ * Index position of the reasoning content part
*/
- response: ResponsesAPI.ResponseObject;
+ content_index: number;
/**
- * Event type identifier, always "response.completed"
+ * Incremental reasoning text being added
*/
- type: 'response.completed';
+ delta: string;
+
+ /**
+ * Unique identifier of the output item being updated
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.reasoning_text.delta"
+ */
+ type: 'response.reasoning_text.delta';
}
-}
-/**
- * OpenAI response object extended with input context information.
- */
-export interface ResponseListResponse {
- /**
- * Unique identifier for this response
- */
- id: string;
+ /**
+ * Streaming event for when reasoning text is completed.
+ */
+ export interface OpenAIResponseObjectStreamResponseReasoningTextDone {
+ /**
+ * Index position of the reasoning content part
+ */
+ content_index: number;
+
+ /**
+ * Unique identifier of the completed output item
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Final complete reasoning text
+ */
+ text: string;
+
+ /**
+ * Event type identifier, always "response.reasoning_text.done"
+ */
+ type: 'response.reasoning_text.done';
+ }
+
+ /**
+ * Streaming event for when a new reasoning summary part is added.
+ */
+ export interface OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded {
+ /**
+ * Unique identifier of the output item
+ */
+ item_id: string;
+
+ /**
+ * Index position of the output item
+ */
+ output_index: number;
+
+ /**
+ * The summary part that was added
+ */
+ part: OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded.Part;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Index of the summary part within the reasoning summary
+ */
+ summary_index: number;
+
+ /**
+ * Event type identifier, always "response.reasoning_summary_part.added"
+ */
+ type: 'response.reasoning_summary_part.added';
+ }
+
+ export namespace OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded {
+ /**
+ * The summary part that was added
+ */
+ export interface Part {
+ /**
+ * Summary text
+ */
+ text: string;
+
+ /**
+ * Content part type identifier, always "summary_text"
+ */
+ type: 'summary_text';
+ }
+ }
+
+ /**
+ * Streaming event for when a reasoning summary part is completed.
+ */
+ export interface OpenAIResponseObjectStreamResponseReasoningSummaryPartDone {
+ /**
+ * Unique identifier of the output item
+ */
+ item_id: string;
+
+ /**
+ * Index position of the output item
+ */
+ output_index: number;
+
+ /**
+ * The completed summary part
+ */
+ part: OpenAIResponseObjectStreamResponseReasoningSummaryPartDone.Part;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Index of the summary part within the reasoning summary
+ */
+ summary_index: number;
+
+ /**
+ * Event type identifier, always "response.reasoning_summary_part.done"
+ */
+ type: 'response.reasoning_summary_part.done';
+ }
+
+ export namespace OpenAIResponseObjectStreamResponseReasoningSummaryPartDone {
+ /**
+ * The completed summary part
+ */
+ export interface Part {
+ /**
+ * Summary text
+ */
+ text: string;
+
+ /**
+ * Content part type identifier, always "summary_text"
+ */
+ type: 'summary_text';
+ }
+ }
+
+ /**
+ * Streaming event for incremental reasoning summary text updates.
+ */
+ export interface OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta {
+ /**
+ * Incremental summary text being added
+ */
+ delta: string;
+
+ /**
+ * Unique identifier of the output item
+ */
+ item_id: string;
+
+ /**
+ * Index position of the output item
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Index of the summary part within the reasoning summary
+ */
+ summary_index: number;
+
+ /**
+ * Event type identifier, always "response.reasoning_summary_text.delta"
+ */
+ type: 'response.reasoning_summary_text.delta';
+ }
+
+ /**
+ * Streaming event for when reasoning summary text is completed.
+ */
+ export interface OpenAIResponseObjectStreamResponseReasoningSummaryTextDone {
+ /**
+ * Unique identifier of the output item
+ */
+ item_id: string;
+
+ /**
+ * Index position of the output item
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Index of the summary part within the reasoning summary
+ */
+ summary_index: number;
+
+ /**
+ * Final complete summary text
+ */
+ text: string;
+
+ /**
+ * Event type identifier, always "response.reasoning_summary_text.done"
+ */
+ type: 'response.reasoning_summary_text.done';
+ }
+
+ /**
+ * Streaming event for incremental refusal text updates.
+ */
+ export interface OpenAIResponseObjectStreamResponseRefusalDelta {
+ /**
+ * Index position of the content part
+ */
+ content_index: number;
+
+ /**
+ * Incremental refusal text being added
+ */
+ delta: string;
+
+ /**
+ * Unique identifier of the output item
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.refusal.delta"
+ */
+ type: 'response.refusal.delta';
+ }
+
+ /**
+ * Streaming event for when refusal text is completed.
+ */
+ export interface OpenAIResponseObjectStreamResponseRefusalDone {
+ /**
+ * Index position of the content part
+ */
+ content_index: number;
+
+ /**
+ * Unique identifier of the output item
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Final complete refusal text
+ */
+ refusal: string;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.refusal.done"
+ */
+ type: 'response.refusal.done';
+ }
+
+ /**
+ * Streaming event for when an annotation is added to output text.
+ */
+ export interface OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded {
+ /**
+ * The annotation object being added
+ */
+ annotation:
+ | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationCitation
+ | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationFilePath;
+
+ /**
+ * Index of the annotation within the content part
+ */
+ annotation_index: number;
+
+ /**
+ * Index position of the content part within the output item
+ */
+ content_index: number;
+
+ /**
+ * Unique identifier of the item to which the annotation is being added
+ */
+ item_id: string;
+
+ /**
+ * Index position of the output item in the response's output array
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.output_text.annotation.added"
+ */
+ type: 'response.output_text.annotation.added';
+ }
+
+ export namespace OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Streaming event for file search calls in progress.
+ */
+ export interface OpenAIResponseObjectStreamResponseFileSearchCallInProgress {
+ /**
+ * Unique identifier of the file search call
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.file_search_call.in_progress"
+ */
+ type: 'response.file_search_call.in_progress';
+ }
+
+ /**
+ * Streaming event for file search currently searching.
+ */
+ export interface OpenAIResponseObjectStreamResponseFileSearchCallSearching {
+ /**
+ * Unique identifier of the file search call
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.file_search_call.searching"
+ */
+ type: 'response.file_search_call.searching';
+ }
+
+ /**
+ * Streaming event for completed file search calls.
+ */
+ export interface OpenAIResponseObjectStreamResponseFileSearchCallCompleted {
+ /**
+ * Unique identifier of the completed file search call
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.file_search_call.completed"
+ */
+ type: 'response.file_search_call.completed';
+ }
+
+ /**
+ * Streaming event emitted when a response ends in an incomplete state.
+ */
+ export interface OpenAIResponseObjectStreamResponseIncomplete {
+ /**
+ * Response object describing the incomplete state
+ */
+ response: ResponsesAPI.ResponseObject;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.incomplete"
+ */
+ type: 'response.incomplete';
+ }
+
+ /**
+ * Streaming event emitted when a response fails.
+ */
+ export interface OpenAIResponseObjectStreamResponseFailed {
+ /**
+ * Response object describing the failure
+ */
+ response: ResponsesAPI.ResponseObject;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.failed"
+ */
+ type: 'response.failed';
+ }
+
+ /**
+ * Streaming event indicating a response has been completed.
+ */
+ export interface OpenAIResponseObjectStreamResponseCompleted {
+ /**
+ * Completed response object
+ */
+ response: ResponsesAPI.ResponseObject;
+
+ /**
+ * Event type identifier, always "response.completed"
+ */
+ type: 'response.completed';
+ }
+}
+
+/**
+ * OpenAI response object extended with input context information.
+ */
+export interface ResponseListResponse {
+ /**
+ * Unique identifier for this response
+ */
+ id: string;
+
+ /**
+ * Unix timestamp when the response was created
+ */
+ created_at: number;
+
+ /**
+ * List of input items that led to this response
+ */
+ input: Array<
+ | ResponseListResponse.OpenAIResponseMessage
+ | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall
+ | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall
+ | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall
+ | ResponseListResponse.OpenAIResponseOutputMessageMcpCall
+ | ResponseListResponse.OpenAIResponseOutputMessageMcpListTools
+ | ResponseListResponse.OpenAIResponseMcpApprovalRequest
+ | ResponseListResponse.OpenAIResponseInputFunctionToolCallOutput
+ | ResponseListResponse.OpenAIResponseMcpApprovalResponse
+ | ResponseListResponse.OpenAIResponseMessage
+ >;
+
+ /**
+ * Model identifier used for generation
+ */
+ model: string;
+
+ /**
+ * Object type identifier, always "response"
+ */
+ object: 'response';
+
+ /**
+ * List of generated output items (messages, tool calls, etc.)
+ */
+ output: Array<
+ | ResponseListResponse.OpenAIResponseMessage
+ | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall
+ | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall
+ | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall
+ | ResponseListResponse.OpenAIResponseOutputMessageMcpCall
+ | ResponseListResponse.OpenAIResponseOutputMessageMcpListTools
+ | ResponseListResponse.OpenAIResponseMcpApprovalRequest
+ >;
+
+ /**
+ * Whether tool calls can be executed in parallel
+ */
+ parallel_tool_calls: boolean;
+
+ /**
+ * Current status of the response generation
+ */
+ status: string;
+
+ /**
+ * Text formatting configuration for the response
+ */
+ text: ResponseListResponse.Text;
+
+ /**
+ * (Optional) Error details if the response generation failed
+ */
+ error?: ResponseListResponse.Error;
+
+ /**
+ * (Optional) System message inserted into the model's context
+ */
+ instructions?: string;
+
+ /**
+ * (Optional) ID of the previous response in a conversation
+ */
+ previous_response_id?: string;
+
+ /**
+ * (Optional) Sampling temperature used for generation
+ */
+ temperature?: number;
+
+ /**
+ * (Optional) An array of tools the model may call while generating a response.
+ */
+ tools?: Array<
+ | ResponseListResponse.OpenAIResponseInputToolWebSearch
+ | ResponseListResponse.OpenAIResponseInputToolFileSearch
+ | ResponseListResponse.OpenAIResponseInputToolFunction
+ | ResponseListResponse.OpenAIResponseToolMcp
+ >;
+
+ /**
+ * (Optional) Nucleus sampling parameter used for generation
+ */
+ top_p?: number;
+
+ /**
+ * (Optional) Truncation strategy applied to the response
+ */
+ truncation?: string;
+
+ /**
+ * (Optional) Token usage information for the response
+ */
+ usage?: ResponseListResponse.Usage;
+}
+
+export namespace ResponseListResponse {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
- /**
- * Unix timestamp when the response was created
- */
- created_at: number;
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
- /**
- * List of input items that led to this response
- */
- input: Array<
- | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall
- | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall
- | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall
- | ResponseListResponse.OpenAIResponseInputFunctionToolCallOutput
- | ResponseListResponse.OpenAIResponseMessage
- >;
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
- /**
- * Model identifier used for generation
- */
- model: string;
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
- /**
- * Object type identifier, always "response"
- */
- object: 'response';
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
- /**
- * List of generated output items (messages, tool calls, etc.)
- */
- output: Array<
- | ResponseListResponse.OpenAIResponseMessage
- | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall
- | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall
- | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall
- | ResponseListResponse.OpenAIResponseOutputMessageMcpCall
- | ResponseListResponse.OpenAIResponseOutputMessageMcpListTools
- >;
+ end_index: number;
- /**
- * Whether tool calls can be executed in parallel
- */
- parallel_tool_calls: boolean;
+ file_id: string;
- /**
- * Current status of the response generation
- */
- status: string;
+ filename: string;
- /**
- * Text formatting configuration for the response
- */
- text: ResponseListResponse.Text;
+ start_index: number;
- /**
- * (Optional) Error details if the response generation failed
- */
- error?: ResponseListResponse.Error;
+ type: 'container_file_citation';
+ }
- /**
- * (Optional) ID of the previous response in a conversation
- */
- previous_response_id?: string;
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
- /**
- * (Optional) Sampling temperature used for generation
- */
- temperature?: number;
+ index: number;
- /**
- * (Optional) Nucleus sampling parameter used for generation
- */
- top_p?: number;
+ type: 'file_path';
+ }
+ }
- /**
- * (Optional) Truncation strategy applied to the response
- */
- truncation?: string;
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
- /**
- * (Optional) User identifier associated with the request
- */
- user?: string;
-}
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
-export namespace ResponseListResponse {
/**
* Web search tool call output message for OpenAI responses.
*/
@@ -2017,6 +3362,108 @@ export namespace ResponseListResponse {
status?: string;
}
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
/**
* This represents the output of a function call that gets passed back to the
* model.
@@ -2033,6 +3480,21 @@ export namespace ResponseListResponse {
status?: string;
}
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
/**
* Corresponds to the various Message types in the Responses API. They are all
* under one type because the Responses API gives them all the same "type" value,
@@ -2045,7 +3507,10 @@ export namespace ResponseListResponse {
| OpenAIResponseMessage.OpenAIResponseInputMessageContentText
| OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
>
- | Array;
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
role: 'system' | 'developer' | 'user' | 'assistant';
@@ -2092,12 +3557,12 @@ export namespace ResponseListResponse {
image_url?: string;
}
- export interface UnionMember2 {
+ export interface OpenAIResponseOutputMessageContentOutputText {
annotations: Array<
- | UnionMember2.OpenAIResponseAnnotationFileCitation
- | UnionMember2.OpenAIResponseAnnotationCitation
- | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
- | UnionMember2.OpenAIResponseAnnotationFilePath
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
>;
text: string;
@@ -2105,7 +3570,7 @@ export namespace ResponseListResponse {
type: 'output_text';
}
- export namespace UnionMember2 {
+ export namespace OpenAIResponseOutputMessageContentOutputText {
/**
* File citation annotation for referencing specific files in response content.
*/
@@ -2183,6 +3648,21 @@ export namespace ResponseListResponse {
type: 'file_path';
}
}
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
}
/**
@@ -2197,7 +3677,10 @@ export namespace ResponseListResponse {
| OpenAIResponseMessage.OpenAIResponseInputMessageContentText
| OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
>
- | Array;
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
role: 'system' | 'developer' | 'user' | 'assistant';
@@ -2244,12 +3727,12 @@ export namespace ResponseListResponse {
image_url?: string;
}
- export interface UnionMember2 {
+ export interface OpenAIResponseOutputMessageContentOutputText {
annotations: Array<
- | UnionMember2.OpenAIResponseAnnotationFileCitation
- | UnionMember2.OpenAIResponseAnnotationCitation
- | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
- | UnionMember2.OpenAIResponseAnnotationFilePath
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
>;
text: string;
@@ -2257,7 +3740,7 @@ export namespace ResponseListResponse {
type: 'output_text';
}
- export namespace UnionMember2 {
+ export namespace OpenAIResponseOutputMessageContentOutputText {
/**
* File citation annotation for referencing specific files in response content.
*/
@@ -2332,8 +3815,23 @@ export namespace ResponseListResponse {
index: number;
- type: 'file_path';
- }
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
}
}
@@ -2429,255 +3927,628 @@ export namespace ResponseListResponse {
arguments: string;
/**
- * Unique identifier for the function call
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * Text formatting configuration for the response
+ */
+ export interface Text {
+ /**
+ * (Optional) Text format configuration specifying output format requirements
+ */
+ format?: Text.Format;
+ }
+
+ export namespace Text {
+ /**
+ * (Optional) Text format configuration specifying output format requirements
+ */
+ export interface Format {
+ /**
+ * Must be "text", "json_schema", or "json_object" to identify the format type
+ */
+ type: 'text' | 'json_schema' | 'json_object';
+
+ /**
+ * (Optional) A description of the response format. Only used for json_schema.
+ */
+ description?: string;
+
+ /**
+ * The name of the response format. Only used for json_schema.
+ */
+ name?: string;
+
+ /**
+ * The JSON schema the response should conform to. In a Python SDK, this is often a
+ * `pydantic` model. Only used for json_schema.
+ */
+ schema?: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * (Optional) Whether to strictly enforce the JSON schema. If true, the response
+ * must match the schema exactly. Only used for json_schema.
+ */
+ strict?: boolean;
+ }
+ }
+
+ /**
+ * (Optional) Error details if the response generation failed
+ */
+ export interface Error {
+ /**
+ * Error code identifying the type of failure
+ */
+ code: string;
+
+ /**
+ * Human-readable error message describing the failure
+ */
+ message: string;
+ }
+
+ /**
+ * Web search tool configuration for OpenAI response inputs.
+ */
+ export interface OpenAIResponseInputToolWebSearch {
+ /**
+ * Web search tool type variant to use
+ */
+ type: 'web_search' | 'web_search_preview' | 'web_search_preview_2025_03_11';
+
+ /**
+ * (Optional) Size of search context, must be "low", "medium", or "high"
+ */
+ search_context_size?: string;
+ }
+
+ /**
+ * File search tool configuration for OpenAI response inputs.
+ */
+ export interface OpenAIResponseInputToolFileSearch {
+ /**
+ * Tool type identifier, always "file_search"
+ */
+ type: 'file_search';
+
+ /**
+ * List of vector store identifiers to search within
+ */
+ vector_store_ids: Array;
+
+ /**
+ * (Optional) Additional filters to apply to the search
+ */
+ filters?: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * (Optional) Maximum number of search results to return (1-50)
+ */
+ max_num_results?: number;
+
+ /**
+ * (Optional) Options for ranking and scoring search results
+ */
+ ranking_options?: OpenAIResponseInputToolFileSearch.RankingOptions;
+ }
+
+ export namespace OpenAIResponseInputToolFileSearch {
+ /**
+ * (Optional) Options for ranking and scoring search results
+ */
+ export interface RankingOptions {
+ /**
+ * (Optional) Name of the ranking algorithm to use
+ */
+ ranker?: string;
+
+ /**
+ * (Optional) Minimum relevance score threshold for results
+ */
+ score_threshold?: number;
+ }
+ }
+
+ /**
+ * Function tool configuration for OpenAI response inputs.
+ */
+ export interface OpenAIResponseInputToolFunction {
+ /**
+ * Name of the function that can be called
*/
- call_id: string;
+ name: string;
/**
- * Name of the function being called
+ * Tool type identifier, always "function"
*/
- name: string;
+ type: 'function';
/**
- * Tool call type identifier, always "function_call"
+ * (Optional) Description of what the function does
*/
- type: 'function_call';
+ description?: string;
/**
- * (Optional) Additional identifier for the tool call
+ * (Optional) JSON schema defining the function's parameters
*/
- id?: string;
+ parameters?: { [key: string]: boolean | number | string | Array | unknown | null };
/**
- * (Optional) Current status of the function call execution
+ * (Optional) Whether to enforce strict parameter validation
*/
- status?: string;
+ strict?: boolean;
}
/**
- * Model Context Protocol (MCP) call output message for OpenAI responses.
+ * Model Context Protocol (MCP) tool configuration for OpenAI response object.
*/
- export interface OpenAIResponseOutputMessageMcpCall {
- /**
- * Unique identifier for this MCP call
- */
- id: string;
-
- /**
- * JSON string containing the MCP call arguments
- */
- arguments: string;
-
- /**
- * Name of the MCP method being called
- */
- name: string;
-
+ export interface OpenAIResponseToolMcp {
/**
- * Label identifying the MCP server handling the call
+ * Label to identify this MCP server
*/
server_label: string;
/**
- * Tool call type identifier, always "mcp_call"
+ * Tool type identifier, always "mcp"
*/
- type: 'mcp_call';
+ type: 'mcp';
/**
- * (Optional) Error message if the MCP call failed
+ * (Optional) Restriction on which tools can be used from this server
*/
- error?: string;
+ allowed_tools?: Array | OpenAIResponseToolMcp.AllowedToolsFilter;
+ }
+ export namespace OpenAIResponseToolMcp {
/**
- * (Optional) Output result from the successful MCP call
+ * Filter configuration for restricting which MCP tools can be used.
*/
- output?: string;
+ export interface AllowedToolsFilter {
+ /**
+ * (Optional) List of specific tool names that are allowed
+ */
+ tool_names?: Array;
+ }
}
/**
- * MCP list tools output message containing available tools from an MCP server.
+ * (Optional) Token usage information for the response
*/
- export interface OpenAIResponseOutputMessageMcpListTools {
+ export interface Usage {
/**
- * Unique identifier for this MCP list tools operation
+ * Number of tokens in the input
*/
- id: string;
+ input_tokens: number;
/**
- * Label identifying the MCP server providing the tools
+ * Number of tokens in the output
*/
- server_label: string;
+ output_tokens: number;
/**
- * List of available tools provided by the MCP server
+ * Total tokens used (input + output)
*/
- tools: Array;
+ total_tokens: number;
/**
- * Tool call type identifier, always "mcp_list_tools"
+ * Detailed breakdown of input token usage
*/
- type: 'mcp_list_tools';
- }
+ input_tokens_details?: Usage.InputTokensDetails;
- export namespace OpenAIResponseOutputMessageMcpListTools {
/**
- * Tool definition returned by MCP list tools operation.
+ * Detailed breakdown of output token usage
*/
- export interface Tool {
- /**
- * JSON schema defining the tool's input parameters
- */
- input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+ output_tokens_details?: Usage.OutputTokensDetails;
+ }
+ export namespace Usage {
+ /**
+ * Detailed breakdown of input token usage
+ */
+ export interface InputTokensDetails {
/**
- * Name of the tool
+ * Number of tokens retrieved from cache
*/
- name: string;
+ cached_tokens?: number;
+ }
+ /**
+ * Detailed breakdown of output token usage
+ */
+ export interface OutputTokensDetails {
/**
- * (Optional) Description of what the tool does
+ * Number of tokens used for reasoning (o1/o3 models)
*/
- description?: string;
+ reasoning_tokens?: number;
}
}
+}
+/**
+ * Response object confirming deletion of an OpenAI response.
+ */
+export interface ResponseDeleteResponse {
/**
- * Text formatting configuration for the response
+ * Unique identifier of the deleted response
*/
- export interface Text {
+ id: string;
+
+ /**
+ * Deletion confirmation flag, always True
+ */
+ deleted: boolean;
+
+ /**
+ * Object type identifier, always "response"
+ */
+ object: 'response';
+}
+
+export type ResponseCreateParams = ResponseCreateParamsNonStreaming | ResponseCreateParamsStreaming;
+
+export interface ResponseCreateParamsBase {
+ /**
+ * Input message(s) to create the response.
+ */
+ input:
+ | string
+ | Array<
+ | ResponseCreateParams.OpenAIResponseMessage
+ | ResponseCreateParams.OpenAIResponseOutputMessageWebSearchToolCall
+ | ResponseCreateParams.OpenAIResponseOutputMessageFileSearchToolCall
+ | ResponseCreateParams.OpenAIResponseOutputMessageFunctionToolCall
+ | ResponseCreateParams.OpenAIResponseOutputMessageMcpCall
+ | ResponseCreateParams.OpenAIResponseOutputMessageMcpListTools
+ | ResponseCreateParams.OpenAIResponseMcpApprovalRequest
+ | ResponseCreateParams.OpenAIResponseInputFunctionToolCallOutput
+ | ResponseCreateParams.OpenAIResponseMcpApprovalResponse
+ | ResponseCreateParams.OpenAIResponseMessage
+ >;
+
+ /**
+ * The underlying LLM used for completions.
+ */
+ model: string;
+
+ /**
+ * (Optional) The ID of a conversation to add the response to. Must begin with
+ * 'conv\_'. Input and output messages will be automatically added to the
+ * conversation.
+ */
+ conversation?: string;
+
+ /**
+ * (Optional) Additional fields to include in the response.
+ */
+ include?: Array;
+
+ instructions?: string;
+
+ max_infer_iters?: number;
+
+ /**
+ * (Optional) if specified, the new response will be a continuation of the previous
+ * response. This can be used to easily fork-off new responses from existing
+ * responses.
+ */
+ previous_response_id?: string;
+
+ store?: boolean;
+
+ stream?: boolean;
+
+ temperature?: number;
+
+ /**
+ * Text response configuration for OpenAI responses.
+ */
+ text?: ResponseCreateParams.Text;
+
+ tools?: Array<
+ | ResponseCreateParams.OpenAIResponseInputToolWebSearch
+ | ResponseCreateParams.OpenAIResponseInputToolFileSearch
+ | ResponseCreateParams.OpenAIResponseInputToolFunction
+ | ResponseCreateParams.OpenAIResponseInputToolMcp
+ >;
+}
+
+export namespace ResponseCreateParams {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
/**
- * (Optional) Text format configuration specifying output format requirements
+ * Text content for input messages in OpenAI response format.
*/
- format?: Text.Format;
- }
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
- export namespace Text {
/**
- * (Optional) Text format configuration specifying output format requirements
+ * Image content for input messages in OpenAI response format.
*/
- export interface Format {
+ export interface OpenAIResponseInputMessageContentImage {
/**
- * Must be "text", "json_schema", or "json_object" to identify the format type
+ * Level of detail for image processing, can be "low", "high", or "auto"
*/
- type: 'text' | 'json_schema' | 'json_object';
+ detail: 'low' | 'high' | 'auto';
/**
- * (Optional) A description of the response format. Only used for json_schema.
+ * Content type identifier, always "input_image"
*/
- description?: string;
+ type: 'input_image';
/**
- * The name of the response format. Only used for json_schema.
+ * (Optional) URL of the image content
*/
- name?: string;
+ image_url?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+ export namespace OpenAIResponseOutputMessageContentOutputText {
/**
- * The JSON schema the response should conform to. In a Python SDK, this is often a
- * `pydantic` model. Only used for json_schema.
+ * File citation annotation for referencing specific files in response content.
*/
- schema?: { [key: string]: boolean | number | string | Array | unknown | null };
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
/**
- * (Optional) Whether to strictly enforce the JSON schema. If true, the response
- * must match the schema exactly. Only used for json_schema.
+ * URL citation annotation for referencing external web resources.
*/
- strict?: boolean;
- }
- }
-
- /**
- * (Optional) Error details if the response generation failed
- */
- export interface Error {
- /**
- * Error code identifying the type of failure
- */
- code: string;
-
- /**
- * Human-readable error message describing the failure
- */
- message: string;
- }
-}
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
-/**
- * Response object confirming deletion of an OpenAI response.
- */
-export interface ResponseDeleteResponse {
- /**
- * Unique identifier of the deleted response
- */
- id: string;
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
- /**
- * Deletion confirmation flag, always True
- */
- deleted: boolean;
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
- /**
- * Object type identifier, always "response"
- */
- object: 'response';
-}
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
-export type ResponseCreateParams = ResponseCreateParamsNonStreaming | ResponseCreateParamsStreaming;
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
-export interface ResponseCreateParamsBase {
- /**
- * Input message(s) to create the response.
- */
- input:
- | string
- | Array<
- | ResponseCreateParams.OpenAIResponseOutputMessageWebSearchToolCall
- | ResponseCreateParams.OpenAIResponseOutputMessageFileSearchToolCall
- | ResponseCreateParams.OpenAIResponseOutputMessageFunctionToolCall
- | ResponseCreateParams.OpenAIResponseInputFunctionToolCallOutput
- | ResponseCreateParams.OpenAIResponseMessage
- >;
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
- /**
- * The underlying LLM used for completions.
- */
- model: string;
+ end_index: number;
- /**
- * (Optional) Additional fields to include in the response.
- */
- include?: Array;
+ file_id: string;
- instructions?: string;
+ filename: string;
- max_infer_iters?: number;
+ start_index: number;
- /**
- * (Optional) if specified, the new response will be a continuation of the previous
- * response. This can be used to easily fork-off new responses from existing
- * responses.
- */
- previous_response_id?: string;
+ type: 'container_file_citation';
+ }
- store?: boolean;
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
- stream?: boolean;
+ index: number;
- temperature?: number;
+ type: 'file_path';
+ }
+ }
- /**
- * Text response configuration for OpenAI responses.
- */
- text?: ResponseCreateParams.Text;
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
- tools?: Array<
- | ResponseCreateParams.OpenAIResponseInputToolWebSearch
- | ResponseCreateParams.OpenAIResponseInputToolFileSearch
- | ResponseCreateParams.OpenAIResponseInputToolFunction
- | ResponseCreateParams.OpenAIResponseInputToolMcp
- >;
-}
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
-export namespace ResponseCreateParams {
/**
* Web search tool call output message for OpenAI responses.
*/
@@ -2795,6 +4666,108 @@ export namespace ResponseCreateParams {
status?: string;
}
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
/**
* This represents the output of a function call that gets passed back to the
* model.
@@ -2811,6 +4784,21 @@ export namespace ResponseCreateParams {
status?: string;
}
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
/**
* Corresponds to the various Message types in the Responses API. They are all
* under one type because the Responses API gives them all the same "type" value,
@@ -2823,7 +4811,10 @@ export namespace ResponseCreateParams {
| OpenAIResponseMessage.OpenAIResponseInputMessageContentText
| OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
>
- | Array;
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
role: 'system' | 'developer' | 'user' | 'assistant';
@@ -2870,12 +4861,12 @@ export namespace ResponseCreateParams {
image_url?: string;
}
- export interface UnionMember2 {
+ export interface OpenAIResponseOutputMessageContentOutputText {
annotations: Array<
- | UnionMember2.OpenAIResponseAnnotationFileCitation
- | UnionMember2.OpenAIResponseAnnotationCitation
- | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
- | UnionMember2.OpenAIResponseAnnotationFilePath
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
>;
text: string;
@@ -2883,7 +4874,7 @@ export namespace ResponseCreateParams {
type: 'output_text';
}
- export namespace UnionMember2 {
+ export namespace OpenAIResponseOutputMessageContentOutputText {
/**
* File citation annotation for referencing specific files in response content.
*/
@@ -2961,6 +4952,21 @@ export namespace ResponseCreateParams {
type: 'file_path';
}
}
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
}
/**
diff --git a/src/resources/routes.ts b/src/resources/routes.ts
index e297e1e..f974d6b 100644
--- a/src/resources/routes.ts
+++ b/src/resources/routes.ts
@@ -12,7 +12,8 @@ import * as InspectAPI from './inspect';
export class Routes extends APIResource {
/**
- * List all available API routes with their methods and implementing providers.
+ * List routes. List all available API routes with their methods and implementing
+ * providers.
*/
list(options?: Core.RequestOptions): Core.APIPromise {
return (
diff --git a/src/resources/safety.ts b/src/resources/safety.ts
index 5c8fdc6..baa5046 100644
--- a/src/resources/safety.ts
+++ b/src/resources/safety.ts
@@ -12,7 +12,7 @@ import * as Shared from './shared';
export class Safety extends APIResource {
/**
- * Run a shield.
+ * Run shield. Run a shield.
*/
runShield(body: SafetyRunShieldParams, options?: Core.RequestOptions): Core.APIPromise {
return this._client.post('/v1/safety/run-shield', { body, ...options });
@@ -33,7 +33,13 @@ export interface SafetyRunShieldParams {
/**
* The messages to run the shield on.
*/
- messages: Array;
+ messages: Array<
+ | SafetyRunShieldParams.OpenAIUserMessageParam
+ | SafetyRunShieldParams.OpenAISystemMessageParam
+ | SafetyRunShieldParams.OpenAIAssistantMessageParam
+ | SafetyRunShieldParams.OpenAIToolMessageParam
+ | SafetyRunShieldParams.OpenAIDeveloperMessageParam
+ >;
/**
* The parameters of the shield.
@@ -46,6 +52,298 @@ export interface SafetyRunShieldParams {
shield_id: string;
}
+export namespace SafetyRunShieldParams {
+ /**
+ * A message from the user in an OpenAI-compatible chat completion request.
+ */
+ export interface OpenAIUserMessageParam {
+ /**
+ * The content of the message, which can include text and other media
+ */
+ content:
+ | string
+ | Array<
+ | OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam
+ | OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam
+ | OpenAIUserMessageParam.OpenAIFile
+ >;
+
+ /**
+ * Must be "user" to identify this as a user message
+ */
+ role: 'user';
+
+ /**
+ * (Optional) The name of the user message participant.
+ */
+ name?: string;
+ }
+
+ export namespace OpenAIUserMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface OpenAIChatCompletionContentPartTextParam {
+ /**
+ * The text content of the message
+ */
+ text: string;
+
+ /**
+ * Must be "text" to identify this as text content
+ */
+ type: 'text';
+ }
+
+ /**
+ * Image content part for OpenAI-compatible chat completion messages.
+ */
+ export interface OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
+ image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
+
+ /**
+ * Must be "image_url" to identify this as image content
+ */
+ type: 'image_url';
+ }
+
+ export namespace OpenAIChatCompletionContentPartImageParam {
+ /**
+ * Image URL specification and processing details
+ */
+ export interface ImageURL {
+ /**
+ * URL of the image to include in the message
+ */
+ url: string;
+
+ /**
+ * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
+ */
+ detail?: string;
+ }
+ }
+
+ export interface OpenAIFile {
+ file: OpenAIFile.File;
+
+ type: 'file';
+ }
+
+ export namespace OpenAIFile {
+ export interface File {
+ file_data?: string;
+
+ file_id?: string;
+
+ filename?: string;
+ }
+ }
+ }
+
+ /**
+ * A system message providing instructions or context to the model.
+ */
+ export interface OpenAISystemMessageParam {
+ /**
+ * The content of the "system prompt". If multiple system messages are provided,
+ * they are concatenated. The underlying Llama Stack code may also add other system
+ * messages (for example, for formatting tool definitions).
+ */
+ content: string | Array;
+
+ /**
+ * Must be "system" to identify this as a system message
+ */
+ role: 'system';
+
+ /**
+ * (Optional) The name of the system message participant.
+ */
+ name?: string;
+ }
+
+ export namespace OpenAISystemMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
+ text: string;
+
+ /**
+ * Must be "text" to identify this as text content
+ */
+ type: 'text';
+ }
+ }
+
+ /**
+ * A message containing the model's (assistant) response in an OpenAI-compatible
+ * chat completion request.
+ */
+ export interface OpenAIAssistantMessageParam {
+ /**
+ * Must be "assistant" to identify this as the model's response
+ */
+ role: 'assistant';
+
+ /**
+ * The content of the model's response
+ */
+ content?: string | Array;
+
+ /**
+ * (Optional) The name of the assistant message participant.
+ */
+ name?: string;
+
+ /**
+ * List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object.
+ */
+ tool_calls?: Array;
+ }
+
+ export namespace OpenAIAssistantMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
+ text: string;
+
+ /**
+ * Must be "text" to identify this as text content
+ */
+ type: 'text';
+ }
+
+ /**
+ * Tool call specification for OpenAI-compatible chat completion responses.
+ */
+ export interface ToolCall {
+ /**
+ * Must be "function" to identify this as a function call
+ */
+ type: 'function';
+
+ /**
+ * (Optional) Unique identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Function call details
+ */
+ function?: ToolCall.Function;
+
+ /**
+ * (Optional) Index of the tool call in the list
+ */
+ index?: number;
+ }
+
+ export namespace ToolCall {
+ /**
+ * (Optional) Function call details
+ */
+ export interface Function {
+ /**
+ * (Optional) Arguments to pass to the function as a JSON string
+ */
+ arguments?: string;
+
+ /**
+ * (Optional) Name of the function to call
+ */
+ name?: string;
+ }
+ }
+ }
+
+ /**
+ * A message representing the result of a tool invocation in an OpenAI-compatible
+ * chat completion request.
+ */
+ export interface OpenAIToolMessageParam {
+ /**
+ * The response content from the tool
+ */
+ content: string | Array;
+
+ /**
+ * Must be "tool" to identify this as a tool response
+ */
+ role: 'tool';
+
+ /**
+ * Unique identifier for the tool call this response is for
+ */
+ tool_call_id: string;
+ }
+
+ export namespace OpenAIToolMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
+ text: string;
+
+ /**
+ * Must be "text" to identify this as text content
+ */
+ type: 'text';
+ }
+ }
+
+ /**
+ * A message from the developer in an OpenAI-compatible chat completion request.
+ */
+ export interface OpenAIDeveloperMessageParam {
+ /**
+ * The content of the developer message
+ */
+ content: string | Array;
+
+ /**
+ * Must be "developer" to identify this as a developer message
+ */
+ role: 'developer';
+
+ /**
+ * (Optional) The name of the developer message participant.
+ */
+ name?: string;
+ }
+
+ export namespace OpenAIDeveloperMessageParam {
+ /**
+ * Text content part for OpenAI-compatible chat completion messages.
+ */
+ export interface UnionMember1 {
+ /**
+ * The text content of the message
+ */
+ text: string;
+
+ /**
+ * Must be "text" to identify this as text content
+ */
+ type: 'text';
+ }
+ }
+}
+
export declare namespace Safety {
export { type RunShieldResponse as RunShieldResponse, type SafetyRunShieldParams as SafetyRunShieldParams };
}
diff --git a/src/resources/shared.ts b/src/resources/shared.ts
index 76155d9..5ed757c 100644
--- a/src/resources/shared.ts
+++ b/src/resources/shared.ts
@@ -6,8 +6,6 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import * as Shared from './shared';
-import * as InferenceAPI from './inference';
import * as ToolRuntimeAPI from './tool-runtime/tool-runtime';
/**
@@ -111,36 +109,6 @@ export namespace AgentConfig {
}
}
-/**
- * Response from a batch completion request.
- */
-export interface BatchCompletion {
- /**
- * List of completion responses, one for each input in the batch
- */
- batch: Array;
-}
-
-/**
- * Response from a chat completion request.
- */
-export interface ChatCompletionResponse {
- /**
- * The complete response message
- */
- completion_message: CompletionMessage;
-
- /**
- * Optional log probabilities for generated tokens
- */
- logprobs?: Array;
-
- /**
- * (Optional) List of metrics associated with the API response
- */
- metrics?: Array;
-}
-
/**
* A message containing the model's (assistant) response in a chat conversation.
*/
@@ -171,63 +139,6 @@ export interface CompletionMessage {
tool_calls?: Array;
}
-/**
- * A text content delta for streaming responses.
- */
-export type ContentDelta = ContentDelta.TextDelta | ContentDelta.ImageDelta | ContentDelta.ToolCallDelta;
-
-export namespace ContentDelta {
- /**
- * A text content delta for streaming responses.
- */
- export interface TextDelta {
- /**
- * The incremental text content
- */
- text: string;
-
- /**
- * Discriminator type of the delta. Always "text"
- */
- type: 'text';
- }
-
- /**
- * An image content delta for streaming responses.
- */
- export interface ImageDelta {
- /**
- * The incremental image data as bytes
- */
- image: string;
-
- /**
- * Discriminator type of the delta. Always "image"
- */
- type: 'image';
- }
-
- /**
- * A tool call content delta for streaming responses.
- */
- export interface ToolCallDelta {
- /**
- * Current parsing status of the tool call
- */
- parse_status: 'started' | 'in_progress' | 'failed' | 'succeeded';
-
- /**
- * Either an in-progress tool call string or the final parsed tool call
- */
- tool_call: string | Shared.ToolCall;
-
- /**
- * Discriminator type of the delta. Always "tool_call"
- */
- type: 'tool_call';
- }
-}
-
/**
* A document to be used for document ingestion in the RAG Tool.
*/
@@ -478,26 +389,6 @@ export namespace InterleavedContentItem {
*/
export type Message = UserMessage | SystemMessage | ToolResponseMessage | CompletionMessage;
-/**
- * A metric value included in API responses.
- */
-export interface Metric {
- /**
- * The name of the metric
- */
- metric: string;
-
- /**
- * The numeric value of the metric
- */
- value: number;
-
- /**
- * (Optional) The unit of measurement for the metric value
- */
- unit?: string;
-}
-
/**
* Parameter type for string values.
*/
@@ -640,7 +531,7 @@ export interface QueryConfig {
/**
* Configuration for the query generator.
*/
- query_generator_config: QueryGeneratorConfig;
+ query_generator_config: QueryConfig.DefaultRagQueryGeneratorConfig | QueryConfig.LlmragQueryGeneratorConfig;
/**
* Search mode for retrieval—either "vector", "keyword", or "hybrid". Default
@@ -656,79 +547,70 @@ export interface QueryConfig {
export namespace QueryConfig {
/**
- * Reciprocal Rank Fusion (RRF) ranker configuration.
+ * Configuration for the default RAG query generator.
*/
- export interface RrfRanker {
+ export interface DefaultRagQueryGeneratorConfig {
/**
- * The impact factor for RRF scoring. Higher values give more weight to
- * higher-ranked results. Must be greater than 0
+ * String separator used to join query terms
*/
- impact_factor: number;
+ separator: string;
/**
- * The type of ranker, always "rrf"
+ * Type of query generator, always 'default'
*/
- type: 'rrf';
+ type: 'default';
}
/**
- * Weighted ranker configuration that combines vector and keyword scores.
+ * Configuration for the LLM-based RAG query generator.
*/
- export interface WeightedRanker {
+ export interface LlmragQueryGeneratorConfig {
/**
- * Weight factor between 0 and 1. 0 means only use keyword scores, 1 means only use
- * vector scores, values in between blend both scores.
+ * Name of the language model to use for query generation
*/
- alpha: number;
+ model: string;
/**
- * The type of ranker, always "weighted"
+ * Template string for formatting the query generation prompt
*/
- type: 'weighted';
- }
-}
+ template: string;
-/**
- * Configuration for the default RAG query generator.
- */
-export type QueryGeneratorConfig =
- | QueryGeneratorConfig.DefaultRagQueryGeneratorConfig
- | QueryGeneratorConfig.LlmragQueryGeneratorConfig;
+ /**
+ * Type of query generator, always 'llm'
+ */
+ type: 'llm';
+ }
-export namespace QueryGeneratorConfig {
/**
- * Configuration for the default RAG query generator.
+ * Reciprocal Rank Fusion (RRF) ranker configuration.
*/
- export interface DefaultRagQueryGeneratorConfig {
+ export interface RrfRanker {
/**
- * String separator used to join query terms
+ * The impact factor for RRF scoring. Higher values give more weight to
+ * higher-ranked results. Must be greater than 0
*/
- separator: string;
+ impact_factor: number;
/**
- * Type of query generator, always 'default'
+ * The type of ranker, always "rrf"
*/
- type: 'default';
+ type: 'rrf';
}
/**
- * Configuration for the LLM-based RAG query generator.
+ * Weighted ranker configuration that combines vector and keyword scores.
*/
- export interface LlmragQueryGeneratorConfig {
- /**
- * Name of the language model to use for query generation
- */
- model: string;
-
+ export interface WeightedRanker {
/**
- * Template string for formatting the query generation prompt
+ * Weight factor between 0 and 1. 0 means only use keyword scores, 1 means only use
+ * vector scores, values in between blend both scores.
*/
- template: string;
+ alpha: number;
/**
- * Type of query generator, always 'llm'
+ * The type of ranker, always "weighted"
*/
- type: 'llm';
+ type: 'weighted';
}
}
@@ -920,33 +802,11 @@ export interface SystemMessage {
}
export interface ToolCall {
- arguments:
- | string
- | {
- [key: string]:
- | string
- | number
- | boolean
- | Array
- | { [key: string]: string | number | boolean | null }
- | null;
- };
+ arguments: string;
call_id: string;
tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {});
-
- arguments_json?: string;
-}
-
-export interface ToolParamDefinition {
- param_type: string;
-
- default?: boolean | number | string | Array | unknown | null;
-
- description?: string;
-
- required?: boolean;
}
/**
diff --git a/src/resources/telemetry.ts b/src/resources/telemetry.ts
deleted file mode 100644
index d7c3d69..0000000
--- a/src/resources/telemetry.ts
+++ /dev/null
@@ -1,692 +0,0 @@
-// Copyright (c) Meta Platforms, Inc. and affiliates.
-// All rights reserved.
-//
-// This source code is licensed under the terms described in the LICENSE file in
-// the root directory of this source tree.
-
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../resource';
-import * as Core from '../core';
-
-export class Telemetry extends APIResource {
- /**
- * Get a span by its ID.
- */
- getSpan(
- traceId: string,
- spanId: string,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.get(`/v1/telemetry/traces/${traceId}/spans/${spanId}`, options);
- }
-
- /**
- * Get a span tree by its ID.
- */
- getSpanTree(
- spanId: string,
- body: TelemetryGetSpanTreeParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return (
- this._client.post(`/v1/telemetry/spans/${spanId}/tree`, { body, ...options }) as Core.APIPromise<{
- data: TelemetryGetSpanTreeResponse;
- }>
- )._thenUnwrap((obj) => obj.data);
- }
-
- /**
- * Get a trace by its ID.
- */
- getTrace(traceId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/telemetry/traces/${traceId}`, options);
- }
-
- /**
- * Log an event.
- */
- logEvent(body: TelemetryLogEventParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/telemetry/events', {
- body,
- ...options,
- headers: { Accept: '*/*', ...options?.headers },
- });
- }
-
- /**
- * Query metrics.
- */
- queryMetrics(
- metricName: string,
- body: TelemetryQueryMetricsParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return (
- this._client.post(`/v1/telemetry/metrics/${metricName}`, { body, ...options }) as Core.APIPromise<{
- data: TelemetryQueryMetricsResponse;
- }>
- )._thenUnwrap((obj) => obj.data);
- }
-
- /**
- * Query spans.
- */
- querySpans(
- body: TelemetryQuerySpansParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return (
- this._client.post('/v1/telemetry/spans', { body, ...options }) as Core.APIPromise<{
- data: TelemetryQuerySpansResponse;
- }>
- )._thenUnwrap((obj) => obj.data);
- }
-
- /**
- * Query traces.
- */
- queryTraces(
- body: TelemetryQueryTracesParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return (
- this._client.post('/v1/telemetry/traces', { body, ...options }) as Core.APIPromise<{
- data: TelemetryQueryTracesResponse;
- }>
- )._thenUnwrap((obj) => obj.data);
- }
-
- /**
- * Save spans to a dataset.
- */
- saveSpansToDataset(
- body: TelemetrySaveSpansToDatasetParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post('/v1/telemetry/spans/export', {
- body,
- ...options,
- headers: { Accept: '*/*', ...options?.headers },
- });
- }
-}
-
-/**
- * An unstructured log event containing a simple text message.
- */
-export type Event = Event.UnstructuredLogEvent | Event.MetricEvent | Event.StructuredLogEvent;
-
-export namespace Event {
- /**
- * An unstructured log event containing a simple text message.
- */
- export interface UnstructuredLogEvent {
- /**
- * The log message text
- */
- message: string;
-
- /**
- * The severity level of the log message
- */
- severity: 'verbose' | 'debug' | 'info' | 'warn' | 'error' | 'critical';
-
- /**
- * Unique identifier for the span this event belongs to
- */
- span_id: string;
-
- /**
- * Timestamp when the event occurred
- */
- timestamp: string;
-
- /**
- * Unique identifier for the trace this event belongs to
- */
- trace_id: string;
-
- /**
- * Event type identifier set to UNSTRUCTURED_LOG
- */
- type: 'unstructured_log';
-
- /**
- * (Optional) Key-value pairs containing additional metadata about the event
- */
- attributes?: { [key: string]: string | number | boolean | null };
- }
-
- /**
- * A metric event containing a measured value.
- */
- export interface MetricEvent {
- /**
- * The name of the metric being measured
- */
- metric: string;
-
- /**
- * Unique identifier for the span this event belongs to
- */
- span_id: string;
-
- /**
- * Timestamp when the event occurred
- */
- timestamp: string;
-
- /**
- * Unique identifier for the trace this event belongs to
- */
- trace_id: string;
-
- /**
- * Event type identifier set to METRIC
- */
- type: 'metric';
-
- /**
- * The unit of measurement for the metric value
- */
- unit: string;
-
- /**
- * The numeric value of the metric measurement
- */
- value: number;
-
- /**
- * (Optional) Key-value pairs containing additional metadata about the event
- */
- attributes?: { [key: string]: string | number | boolean | null };
- }
-
- /**
- * A structured log event containing typed payload data.
- */
- export interface StructuredLogEvent {
- /**
- * The structured payload data for the log event
- */
- payload: StructuredLogEvent.SpanStartPayload | StructuredLogEvent.SpanEndPayload;
-
- /**
- * Unique identifier for the span this event belongs to
- */
- span_id: string;
-
- /**
- * Timestamp when the event occurred
- */
- timestamp: string;
-
- /**
- * Unique identifier for the trace this event belongs to
- */
- trace_id: string;
-
- /**
- * Event type identifier set to STRUCTURED_LOG
- */
- type: 'structured_log';
-
- /**
- * (Optional) Key-value pairs containing additional metadata about the event
- */
- attributes?: { [key: string]: string | number | boolean | null };
- }
-
- export namespace StructuredLogEvent {
- /**
- * Payload for a span start event.
- */
- export interface SpanStartPayload {
- /**
- * Human-readable name describing the operation this span represents
- */
- name: string;
-
- /**
- * Payload type identifier set to SPAN_START
- */
- type: 'span_start';
-
- /**
- * (Optional) Unique identifier for the parent span, if this is a child span
- */
- parent_span_id?: string;
- }
-
- /**
- * Payload for a span end event.
- */
- export interface SpanEndPayload {
- /**
- * The final status of the span indicating success or failure
- */
- status: 'ok' | 'error';
-
- /**
- * Payload type identifier set to SPAN_END
- */
- type: 'span_end';
- }
- }
-}
-
-/**
- * A condition for filtering query results.
- */
-export interface QueryCondition {
- /**
- * The attribute key to filter on
- */
- key: string;
-
- /**
- * The comparison operator to apply
- */
- op: 'eq' | 'ne' | 'gt' | 'lt';
-
- /**
- * The value to compare against
- */
- value: boolean | number | string | Array | unknown | null;
-}
-
-/**
- * Response containing a list of spans.
- */
-export interface QuerySpansResponse {
- /**
- * List of spans matching the query criteria
- */
- data: TelemetryQuerySpansResponse;
-}
-
-/**
- * A span that includes status information.
- */
-export interface SpanWithStatus {
- /**
- * Human-readable name describing the operation this span represents
- */
- name: string;
-
- /**
- * Unique identifier for the span
- */
- span_id: string;
-
- /**
- * Timestamp when the operation began
- */
- start_time: string;
-
- /**
- * Unique identifier for the trace this span belongs to
- */
- trace_id: string;
-
- /**
- * (Optional) Key-value pairs containing additional metadata about the span
- */
- attributes?: { [key: string]: boolean | number | string | Array | unknown | null };
-
- /**
- * (Optional) Timestamp when the operation finished, if completed
- */
- end_time?: string;
-
- /**
- * (Optional) Unique identifier for the parent span, if this is a child span
- */
- parent_span_id?: string;
-
- /**
- * (Optional) The current status of the span
- */
- status?: 'ok' | 'error';
-}
-
-/**
- * A trace representing the complete execution path of a request across multiple
- * operations.
- */
-export interface Trace {
- /**
- * Unique identifier for the root span that started this trace
- */
- root_span_id: string;
-
- /**
- * Timestamp when the trace began
- */
- start_time: string;
-
- /**
- * Unique identifier for the trace
- */
- trace_id: string;
-
- /**
- * (Optional) Timestamp when the trace finished, if completed
- */
- end_time?: string;
-}
-
-/**
- * A span representing a single operation within a trace.
- */
-export interface TelemetryGetSpanResponse {
- /**
- * Human-readable name describing the operation this span represents
- */
- name: string;
-
- /**
- * Unique identifier for the span
- */
- span_id: string;
-
- /**
- * Timestamp when the operation began
- */
- start_time: string;
-
- /**
- * Unique identifier for the trace this span belongs to
- */
- trace_id: string;
-
- /**
- * (Optional) Key-value pairs containing additional metadata about the span
- */
- attributes?: { [key: string]: boolean | number | string | Array | unknown | null };
-
- /**
- * (Optional) Timestamp when the operation finished, if completed
- */
- end_time?: string;
-
- /**
- * (Optional) Unique identifier for the parent span, if this is a child span
- */
- parent_span_id?: string;
-}
-
-/**
- * Dictionary mapping span IDs to spans with status information
- */
-export type TelemetryGetSpanTreeResponse = { [key: string]: SpanWithStatus };
-
-/**
- * List of metric series matching the query criteria
- */
-export type TelemetryQueryMetricsResponse =
- Array;
-
-export namespace TelemetryQueryMetricsResponse {
- /**
- * A time series of metric data points.
- */
- export interface TelemetryQueryMetricsResponseItem {
- /**
- * List of labels associated with this metric series
- */
- labels: Array;
-
- /**
- * The name of the metric
- */
- metric: string;
-
- /**
- * List of data points in chronological order
- */
- values: Array;
- }
-
- export namespace TelemetryQueryMetricsResponseItem {
- /**
- * A label associated with a metric.
- */
- export interface Label {
- /**
- * The name of the label
- */
- name: string;
-
- /**
- * The value of the label
- */
- value: string;
- }
-
- /**
- * A single data point in a metric time series.
- */
- export interface Value {
- /**
- * Unix timestamp when the metric value was recorded
- */
- timestamp: number;
-
- unit: string;
-
- /**
- * The numeric value of the metric at this timestamp
- */
- value: number;
- }
- }
-}
-
-/**
- * List of spans matching the query criteria
- */
-export type TelemetryQuerySpansResponse = Array;
-
-export namespace TelemetryQuerySpansResponse {
- /**
- * A span representing a single operation within a trace.
- */
- export interface TelemetryQuerySpansResponseItem {
- /**
- * Human-readable name describing the operation this span represents
- */
- name: string;
-
- /**
- * Unique identifier for the span
- */
- span_id: string;
-
- /**
- * Timestamp when the operation began
- */
- start_time: string;
-
- /**
- * Unique identifier for the trace this span belongs to
- */
- trace_id: string;
-
- /**
- * (Optional) Key-value pairs containing additional metadata about the span
- */
- attributes?: { [key: string]: boolean | number | string | Array | unknown | null };
-
- /**
- * (Optional) Timestamp when the operation finished, if completed
- */
- end_time?: string;
-
- /**
- * (Optional) Unique identifier for the parent span, if this is a child span
- */
- parent_span_id?: string;
- }
-}
-
-/**
- * List of traces matching the query criteria
- */
-export type TelemetryQueryTracesResponse = Array;
-
-export interface TelemetryGetSpanTreeParams {
- /**
- * The attributes to return in the tree.
- */
- attributes_to_return?: Array;
-
- /**
- * The maximum depth of the tree.
- */
- max_depth?: number;
-}
-
-export interface TelemetryLogEventParams {
- /**
- * The event to log.
- */
- event: Event;
-
- /**
- * The time to live of the event.
- */
- ttl_seconds: number;
-}
-
-export interface TelemetryQueryMetricsParams {
- /**
- * The type of query to perform.
- */
- query_type: 'range' | 'instant';
-
- /**
- * The start time of the metric to query.
- */
- start_time: number;
-
- /**
- * The end time of the metric to query.
- */
- end_time?: number;
-
- /**
- * The granularity of the metric to query.
- */
- granularity?: string;
-
- /**
- * The label matchers to apply to the metric.
- */
- label_matchers?: Array;
-}
-
-export namespace TelemetryQueryMetricsParams {
- /**
- * A matcher for filtering metrics by label values.
- */
- export interface LabelMatcher {
- /**
- * The name of the label to match
- */
- name: string;
-
- /**
- * The comparison operator to use for matching
- */
- operator: '=' | '!=' | '=~' | '!~';
-
- /**
- * The value to match against
- */
- value: string;
- }
-}
-
-export interface TelemetryQuerySpansParams {
- /**
- * The attribute filters to apply to the spans.
- */
- attribute_filters: Array;
-
- /**
- * The attributes to return in the spans.
- */
- attributes_to_return: Array;
-
- /**
- * The maximum depth of the tree.
- */
- max_depth?: number;
-}
-
-export interface TelemetryQueryTracesParams {
- /**
- * The attribute filters to apply to the traces.
- */
- attribute_filters?: Array;
-
- /**
- * The limit of traces to return.
- */
- limit?: number;
-
- /**
- * The offset of the traces to return.
- */
- offset?: number;
-
- /**
- * The order by of the traces to return.
- */
- order_by?: Array;
-}
-
-export interface TelemetrySaveSpansToDatasetParams {
- /**
- * The attribute filters to apply to the spans.
- */
- attribute_filters: Array;
-
- /**
- * The attributes to save to the dataset.
- */
- attributes_to_save: Array;
-
- /**
- * The ID of the dataset to save the spans to.
- */
- dataset_id: string;
-
- /**
- * The maximum depth of the tree.
- */
- max_depth?: number;
-}
-
-export declare namespace Telemetry {
- export {
- type Event as Event,
- type QueryCondition as QueryCondition,
- type QuerySpansResponse as QuerySpansResponse,
- type SpanWithStatus as SpanWithStatus,
- type Trace as Trace,
- type TelemetryGetSpanResponse as TelemetryGetSpanResponse,
- type TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse,
- type TelemetryQueryMetricsResponse as TelemetryQueryMetricsResponse,
- type TelemetryQuerySpansResponse as TelemetryQuerySpansResponse,
- type TelemetryQueryTracesResponse as TelemetryQueryTracesResponse,
- type TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams,
- type TelemetryLogEventParams as TelemetryLogEventParams,
- type TelemetryQueryMetricsParams as TelemetryQueryMetricsParams,
- type TelemetryQuerySpansParams as TelemetryQuerySpansParams,
- type TelemetryQueryTracesParams as TelemetryQueryTracesParams,
- type TelemetrySaveSpansToDatasetParams as TelemetrySaveSpansToDatasetParams,
- };
-}
diff --git a/src/resources/tool-runtime/rag-tool.ts b/src/resources/tool-runtime/rag-tool.ts
index eafcecb..0df1b75 100644
--- a/src/resources/tool-runtime/rag-tool.ts
+++ b/src/resources/tool-runtime/rag-tool.ts
@@ -44,7 +44,7 @@ export interface RagToolInsertParams {
/**
* ID of the vector database to store the document embeddings
*/
- vector_db_id: string;
+ vector_store_id: string;
}
export interface RagToolQueryParams {
@@ -56,7 +56,7 @@ export interface RagToolQueryParams {
/**
* List of vector database IDs to search within
*/
- vector_db_ids: Array;
+ vector_store_ids: Array;
/**
* (Optional) Configuration parameters for the query operation
diff --git a/src/resources/tool-runtime/tool-runtime.ts b/src/resources/tool-runtime/tool-runtime.ts
index 1053f89..073a48d 100644
--- a/src/resources/tool-runtime/tool-runtime.ts
+++ b/src/resources/tool-runtime/tool-runtime.ts
@@ -63,47 +63,25 @@ export interface ToolDef {
*/
description?: string;
+ /**
+ * (Optional) JSON Schema for tool inputs (MCP inputSchema)
+ */
+ input_schema?: { [key: string]: boolean | number | string | Array | unknown | null };
+
/**
* (Optional) Additional metadata about the tool
*/
metadata?: { [key: string]: boolean | number | string | Array | unknown | null };
/**
- * (Optional) List of parameters this tool accepts
+ * (Optional) JSON Schema for tool outputs (MCP outputSchema)
*/
- parameters?: Array;
-}
+ output_schema?: { [key: string]: boolean | number | string | Array | unknown | null };
-export namespace ToolDef {
/**
- * Parameter definition for a tool.
+ * (Optional) ID of the tool group this tool belongs to
*/
- export interface Parameter {
- /**
- * Human-readable description of what the parameter does
- */
- description: string;
-
- /**
- * Name of the parameter
- */
- name: string;
-
- /**
- * Type of the parameter (e.g., string, integer)
- */
- parameter_type: string;
-
- /**
- * Whether this parameter is required for tool invocation
- */
- required: boolean;
-
- /**
- * (Optional) Default value for the parameter if not provided
- */
- default?: boolean | number | string | Array | unknown | null;
- }
+ toolgroup_id?: string;
}
/**
diff --git a/src/resources/tools.ts b/src/resources/tools.ts
index 612a78b..93f95b7 100644
--- a/src/resources/tools.ts
+++ b/src/resources/tools.ts
@@ -9,6 +9,7 @@
import { APIResource } from '../resource';
import { isRequestOptions } from '../core';
import * as Core from '../core';
+import * as ToolRuntimeAPI from './tool-runtime/tool-runtime';
export class Tools extends APIResource {
/**
@@ -31,93 +32,15 @@ export class Tools extends APIResource {
/**
* Get a tool by its name.
*/
- get(toolName: string, options?: Core.RequestOptions): Core.APIPromise {
+ get(toolName: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.get(`/v1/tools/${toolName}`, options);
}
}
/**
- * Response containing a list of tools.
+ * List of tool definitions
*/
-export interface ListToolsResponse {
- /**
- * List of tools
- */
- data: ToolListResponse;
-}
-
-/**
- * A tool that can be invoked by agents.
- */
-export interface Tool {
- /**
- * Human-readable description of what the tool does
- */
- description: string;
-
- identifier: string;
-
- /**
- * List of parameters this tool accepts
- */
- parameters: Array;
-
- provider_id: string;
-
- /**
- * ID of the tool group this tool belongs to
- */
- toolgroup_id: string;
-
- /**
- * Type of resource, always 'tool'
- */
- type: 'tool';
-
- /**
- * (Optional) Additional metadata about the tool
- */
- metadata?: { [key: string]: boolean | number | string | Array | unknown | null };
-
- provider_resource_id?: string;
-}
-
-export namespace Tool {
- /**
- * Parameter definition for a tool.
- */
- export interface Parameter {
- /**
- * Human-readable description of what the parameter does
- */
- description: string;
-
- /**
- * Name of the parameter
- */
- name: string;
-
- /**
- * Type of the parameter (e.g., string, integer)
- */
- parameter_type: string;
-
- /**
- * Whether this parameter is required for tool invocation
- */
- required: boolean;
-
- /**
- * (Optional) Default value for the parameter if not provided
- */
- default?: boolean | number | string | Array | unknown | null;
- }
-}
-
-/**
- * List of tools
- */
-export type ToolListResponse = Array;
+export type ToolListResponse = Array;
export interface ToolListParams {
/**
@@ -127,10 +50,5 @@ export interface ToolListParams {
}
export declare namespace Tools {
- export {
- type ListToolsResponse as ListToolsResponse,
- type Tool as Tool,
- type ToolListResponse as ToolListResponse,
- type ToolListParams as ToolListParams,
- };
+ export { type ToolListResponse as ToolListResponse, type ToolListParams as ToolListParams };
}
diff --git a/src/resources/vector-dbs.ts b/src/resources/vector-dbs.ts
deleted file mode 100644
index 9c064d5..0000000
--- a/src/resources/vector-dbs.ts
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright (c) Meta Platforms, Inc. and affiliates.
-// All rights reserved.
-//
-// This source code is licensed under the terms described in the LICENSE file in
-// the root directory of this source tree.
-
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../resource';
-import * as Core from '../core';
-
-export class VectorDBs extends APIResource {
- /**
- * Get a vector database by its identifier.
- */
- retrieve(vectorDBId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/vector-dbs/${vectorDBId}`, options);
- }
-
- /**
- * List all vector databases.
- */
- list(options?: Core.RequestOptions): Core.APIPromise {
- return (
- this._client.get('/v1/vector-dbs', options) as Core.APIPromise<{ data: VectorDBListResponse }>
- )._thenUnwrap((obj) => obj.data);
- }
-
- /**
- * Register a vector database.
- */
- register(
- body: VectorDBRegisterParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise