diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 091cfb1..f7014c3 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.10.0"
+ ".": "0.11.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 64eaa82..7000a67 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 36
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-c8152db455001be3f09a3bc60d63711699d2c2a4ea5f7bbc1d71726efda0fd9b.yml
-openapi_spec_hash: 97719df292ca220de5d35d36f9756b95
-config_hash: fdaf751580ba8a60e222e560847af1ac
+configured_endpoints: 40
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-ca7f807eec8b98ae96fdd0aa6844e268696ffd9219152aed652631c963626a7c.yml
+openapi_spec_hash: 85ecd2e4bb8d86a808aa03a9ada2d3c6
+config_hash: 73a926de3d35acf984c7a39bbf08c99d
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b204145..17223c7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,33 @@
# Changelog
+## 0.11.0 (2026-01-13)
+
+Full Changelog: [v0.10.0...v0.11.0](https://github.com/ContextualAI/contextual-client-python/compare/v0.10.0...v0.11.0)
+
+### Features
+
+* **api:** manual updates ([9d0101f](https://github.com/ContextualAI/contextual-client-python/commit/9d0101ffe5e7af7104ef461b420112cb66a32a2d))
+
+
+### Bug Fixes
+
+* **compat:** update signatures of `model_dump` and `model_dump_json` for Pydantic v1 ([fdbee33](https://github.com/ContextualAI/contextual-client-python/commit/fdbee33a0cb13ef5bfc346bc969ff890c69c2c98))
+* ensure streams are always closed ([6bdc831](https://github.com/ContextualAI/contextual-client-python/commit/6bdc8314add1b629d570f4ad623540b12c4e9332))
+* **types:** allow pyright to infer TypedDict types within SequenceNotStr ([831b67d](https://github.com/ContextualAI/contextual-client-python/commit/831b67dbc9acbb586bd2b9c822dbb01cac56f580))
+* use async_to_httpx_files in patch method ([04bc4cc](https://github.com/ContextualAI/contextual-client-python/commit/04bc4cc6900d37483546c7c754ffe459371e6ae0))
+
+
+### Chores
+
+* add missing docstrings ([9c019eb](https://github.com/ContextualAI/contextual-client-python/commit/9c019ebc453ca8ef0e2a587c57af09b995f2e581))
+* add Python 3.14 classifier and testing ([26541da](https://github.com/ContextualAI/contextual-client-python/commit/26541dab7cfa5d991a300fe0010bad88a4ecf733))
+* **deps:** mypy 1.18.1 has a regression, pin to 1.17 ([4566f14](https://github.com/ContextualAI/contextual-client-python/commit/4566f141302cad92947d2cd1ea927d44ffa441c7))
+* **docs:** use environment variables for authentication in code snippets ([92f7455](https://github.com/ContextualAI/contextual-client-python/commit/92f74555cddacdb3396094ac0742fb66f1c44bef))
+* **internal:** add `--fix` argument to lint script ([0264ded](https://github.com/ContextualAI/contextual-client-python/commit/0264ded1e8082533087b4307365e318d9f02398e))
+* **internal:** add missing files argument to base client ([44a2de1](https://github.com/ContextualAI/contextual-client-python/commit/44a2de1749524dfc21042d082d60f080953c3da3))
+* **internal:** codegen related update ([37f6758](https://github.com/ContextualAI/contextual-client-python/commit/37f6758c6b43ecbb642d84dc692084cb8daef0c0))
+* update lockfile ([9dd374a](https://github.com/ContextualAI/contextual-client-python/commit/9dd374a4936f2269bc05ff5c8c672acaae629f86))
+
## 0.10.0 (2025-11-11)
Full Changelog: [v0.9.0...v0.10.0](https://github.com/ContextualAI/contextual-client-python/compare/v0.9.0...v0.10.0)
diff --git a/LICENSE b/LICENSE
index 488ad4f..7f8bb61 100644
--- a/LICENSE
+++ b/LICENSE
@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
- Copyright 2025 Contextual AI
+ Copyright 2026 Contextual AI
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/README.md b/README.md
index ccdb4cc..7eb5680 100644
--- a/README.md
+++ b/README.md
@@ -81,6 +81,7 @@ pip install contextual-client[aiohttp]
Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`:
```python
+import os
import asyncio
from contextual import DefaultAioHttpClient
from contextual import AsyncContextualAI
@@ -88,7 +89,7 @@ from contextual import AsyncContextualAI
async def main() -> None:
async with AsyncContextualAI(
- api_key="My API Key",
+ api_key=os.environ.get("CONTEXTUAL_API_KEY"), # This is the default and can be omitted
http_client=DefaultAioHttpClient(),
) as client:
create_agent_output = await client.agents.create(
diff --git a/api.md b/api.md
index bc19f43..cb297b4 100644
--- a/api.md
+++ b/api.md
@@ -1,14 +1,26 @@
+# Shared Types
+
+```python
+from contextual.types import ParsedBlock
+```
+
# Datastores
Types:
```python
from contextual.types import (
+ ChunkingConfiguration,
CreateDatastoreResponse,
Datastore,
DatastoreMetadata,
+ DatastoreParseConfiguration,
+ HTMLConfiguration,
ListDatastoresResponse,
+ UnstructuredDatastoreConfigModel,
DatastoreUpdateResponse,
+ DatastoreDeleteResponse,
+ DatastoreResetResponse,
)
```
@@ -17,9 +29,9 @@ Methods:
- client.datastores.create(\*\*params) -> CreateDatastoreResponse
- client.datastores.update(datastore_id, \*\*params) -> DatastoreUpdateResponse
- client.datastores.list(\*\*params) -> SyncDatastoresPage[Datastore]
-- client.datastores.delete(datastore_id) -> object
+- client.datastores.delete(datastore_id) -> DatastoreDeleteResponse
- client.datastores.metadata(datastore_id) -> DatastoreMetadata
-- client.datastores.reset(datastore_id) -> object
+- client.datastores.reset(datastore_id) -> DatastoreResetResponse
## Documents
@@ -32,6 +44,7 @@ from contextual.types.datastores import (
DocumentMetadata,
IngestionResponse,
ListDocumentsResponse,
+ DocumentDeleteResponse,
DocumentGetParseResultResponse,
)
```
@@ -39,7 +52,7 @@ from contextual.types.datastores import (
Methods:
- client.datastores.documents.list(datastore_id, \*\*params) -> SyncDocumentsPage[DocumentMetadata]
-- client.datastores.documents.delete(document_id, \*, datastore_id) -> object
+- client.datastores.documents.delete(document_id, \*, datastore_id) -> DocumentDeleteResponse
- client.datastores.documents.get_parse_result(document_id, \*, datastore_id, \*\*params) -> DocumentGetParseResultResponse
- client.datastores.documents.ingest(datastore_id, \*\*params) -> IngestionResponse
- client.datastores.documents.metadata(document_id, \*, datastore_id) -> DocumentMetadata
@@ -58,12 +71,25 @@ Methods:
- client.datastores.contents.list(datastore_id, \*\*params) -> SyncContentsPage[ContentListResponse]
- client.datastores.contents.metadata(content_id, \*, datastore_id, \*\*params) -> ContentMetadataResponse
+## Chunks
+
+Types:
+
+```python
+from contextual.types.datastores import ChunkUpdateContentResponse
+```
+
+Methods:
+
+- client.datastores.chunks.update_content(content_id, \*, datastore_id, \*\*params) -> ChunkUpdateContentResponse
+
# Agents
Types:
```python
from contextual.types import (
+ ACLConfig,
Agent,
AgentConfigs,
AgentMetadata,
@@ -72,7 +98,11 @@ from contextual.types import (
GenerateResponseConfig,
GlobalConfig,
ListAgentsResponse,
+ ReformulationConfig,
RetrievalConfig,
+ TranslationConfig,
+ AgentUpdateResponse,
+ AgentDeleteResponse,
AgentMetadataResponse,
)
```
@@ -80,12 +110,13 @@ from contextual.types import (
Methods:
- client.agents.create(\*\*params) -> CreateAgentOutput
-- client.agents.update(agent_id, \*\*params) -> object
+- client.agents.update(agent_id, \*\*params) -> AgentUpdateResponse
- client.agents.list(\*\*params) -> SyncPage[Agent]
-- client.agents.delete(agent_id) -> object
+- client.agents.delete(agent_id) -> AgentDeleteResponse
- client.agents.copy(agent_id) -> CreateAgentOutput
- client.agents.metadata(agent_id) -> AgentMetadataResponse
- client.agents.reset(agent_id) -> object
+- client.agents.save_template(agent_id, \*\*params) -> object
## Query
@@ -107,6 +138,19 @@ Methods:
- client.agents.query.metrics(agent_id, \*\*params) -> QueryMetricsResponse
- client.agents.query.retrieval_info(message_id, \*, agent_id, \*\*params) -> RetrievalInfoResponse
+## Templates
+
+Types:
+
+```python
+from contextual.types.agents import TemplateListResponse
+```
+
+Methods:
+
+- client.agents.templates.retrieve(template) -> AgentMetadata
+- client.agents.templates.list() -> TemplateListResponse
+
# Users
Types:
diff --git a/pyproject.toml b/pyproject.toml
index 9aba0a8..8e58af4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,20 +1,22 @@
[project]
name = "contextual-client"
-version = "0.10.0"
+version = "0.11.0"
description = "The official Python library for the Contextual AI API"
dynamic = ["readme"]
license = "Apache-2.0"
authors = [
{ name = "Contextual AI", email = "support@contextual.ai" },
]
+
dependencies = [
- "httpx>=0.23.0, <1",
- "pydantic>=1.9.0, <3",
- "typing-extensions>=4.10, <5",
- "anyio>=3.5.0, <5",
- "distro>=1.7.0, <2",
- "sniffio",
+ "httpx>=0.23.0, <1",
+ "pydantic>=1.9.0, <3",
+ "typing-extensions>=4.10, <5",
+ "anyio>=3.5.0, <5",
+ "distro>=1.7.0, <2",
+ "sniffio",
]
+
requires-python = ">= 3.9"
classifiers = [
"Typing :: Typed",
@@ -24,6 +26,7 @@ classifiers = [
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
+ "Programming Language :: Python :: 3.14",
"Operating System :: OS Independent",
"Operating System :: POSIX",
"Operating System :: MacOS",
@@ -45,7 +48,7 @@ managed = true
# version pins are in requirements-dev.lock
dev-dependencies = [
"pyright==1.1.399",
- "mypy",
+ "mypy==1.17",
"respx",
"pytest",
"pytest-asyncio",
diff --git a/requirements-dev.lock b/requirements-dev.lock
index d95129c..1cce592 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -12,40 +12,45 @@
-e file:.
aiohappyeyeballs==2.6.1
# via aiohttp
-aiohttp==3.12.8
+aiohttp==3.13.2
# via contextual-client
# via httpx-aiohttp
-aiosignal==1.3.2
+aiosignal==1.4.0
# via aiohttp
-annotated-types==0.6.0
+annotated-types==0.7.0
# via pydantic
-anyio==4.4.0
+anyio==4.12.0
# via contextual-client
# via httpx
-argcomplete==3.1.2
+argcomplete==3.6.3
# via nox
async-timeout==5.0.1
# via aiohttp
-attrs==25.3.0
+attrs==25.4.0
# via aiohttp
-certifi==2023.7.22
+ # via nox
+backports-asyncio-runner==1.2.0
+ # via pytest-asyncio
+certifi==2025.11.12
# via httpcore
# via httpx
-colorlog==6.7.0
+colorlog==6.10.1
+ # via nox
+dependency-groups==1.3.1
# via nox
-dirty-equals==0.6.0
-distlib==0.3.7
+dirty-equals==0.11
+distlib==0.4.0
# via virtualenv
-distro==1.8.0
+distro==1.9.0
# via contextual-client
-exceptiongroup==1.2.2
+exceptiongroup==1.3.1
# via anyio
# via pytest
-execnet==2.1.1
+execnet==2.1.2
# via pytest-xdist
-filelock==3.12.4
+filelock==3.19.1
# via virtualenv
-frozenlist==1.6.2
+frozenlist==1.8.0
# via aiohttp
# via aiosignal
h11==0.16.0
@@ -58,80 +63,87 @@ httpx==0.28.1
# via respx
httpx-aiohttp==0.1.9
# via contextual-client
-idna==3.4
+humanize==4.13.0
+ # via nox
+idna==3.11
# via anyio
# via httpx
# via yarl
-importlib-metadata==7.0.0
-iniconfig==2.0.0
+importlib-metadata==8.7.0
+iniconfig==2.1.0
# via pytest
markdown-it-py==3.0.0
# via rich
mdurl==0.1.2
# via markdown-it-py
-multidict==6.4.4
+multidict==6.7.0
# via aiohttp
# via yarl
-mypy==1.14.1
-mypy-extensions==1.0.0
+mypy==1.17.0
+mypy-extensions==1.1.0
# via mypy
-nodeenv==1.8.0
+nodeenv==1.9.1
# via pyright
-nox==2023.4.22
-packaging==23.2
+nox==2025.11.12
+packaging==25.0
+ # via dependency-groups
# via nox
# via pytest
-platformdirs==3.11.0
+pathspec==0.12.1
+ # via mypy
+platformdirs==4.4.0
# via virtualenv
-pluggy==1.5.0
+pluggy==1.6.0
# via pytest
-propcache==0.3.1
+propcache==0.4.1
# via aiohttp
# via yarl
-pydantic==2.11.9
+pydantic==2.12.5
# via contextual-client
-pydantic-core==2.33.2
+pydantic-core==2.41.5
# via pydantic
-pygments==2.18.0
+pygments==2.19.2
+ # via pytest
# via rich
pyright==1.1.399
-pytest==8.3.3
+pytest==8.4.2
# via pytest-asyncio
# via pytest-xdist
-pytest-asyncio==0.24.0
-pytest-xdist==3.7.0
-python-dateutil==2.8.2
+pytest-asyncio==1.2.0
+pytest-xdist==3.8.0
+python-dateutil==2.9.0.post0
# via time-machine
-pytz==2023.3.post1
- # via dirty-equals
respx==0.22.0
-rich==13.7.1
-ruff==0.9.4
-setuptools==68.2.2
- # via nodeenv
-six==1.16.0
+rich==14.2.0
+ruff==0.14.7
+six==1.17.0
# via python-dateutil
-sniffio==1.3.0
- # via anyio
+sniffio==1.3.1
# via contextual-client
-time-machine==2.9.0
-tomli==2.0.2
+time-machine==2.19.0
+tomli==2.3.0
+ # via dependency-groups
# via mypy
+ # via nox
# via pytest
-typing-extensions==4.12.2
+typing-extensions==4.15.0
+ # via aiosignal
# via anyio
# via contextual-client
+ # via exceptiongroup
# via multidict
# via mypy
# via pydantic
# via pydantic-core
# via pyright
+ # via pytest-asyncio
# via typing-inspection
-typing-inspection==0.4.1
+ # via virtualenv
+typing-inspection==0.4.2
# via pydantic
-virtualenv==20.24.5
+virtualenv==20.35.4
# via nox
-yarl==1.20.0
+yarl==1.22.0
# via aiohttp
-zipp==3.17.0
+zipp==3.23.0
# via importlib-metadata
diff --git a/requirements.lock b/requirements.lock
index b5c6c83..4afa4d7 100644
--- a/requirements.lock
+++ b/requirements.lock
@@ -12,28 +12,28 @@
-e file:.
aiohappyeyeballs==2.6.1
# via aiohttp
-aiohttp==3.12.8
+aiohttp==3.13.2
# via contextual-client
# via httpx-aiohttp
-aiosignal==1.3.2
+aiosignal==1.4.0
# via aiohttp
-annotated-types==0.6.0
+annotated-types==0.7.0
# via pydantic
-anyio==4.4.0
+anyio==4.12.0
# via contextual-client
# via httpx
async-timeout==5.0.1
# via aiohttp
-attrs==25.3.0
+attrs==25.4.0
# via aiohttp
-certifi==2023.7.22
+certifi==2025.11.12
# via httpcore
# via httpx
-distro==1.8.0
+distro==1.9.0
# via contextual-client
-exceptiongroup==1.2.2
+exceptiongroup==1.3.1
# via anyio
-frozenlist==1.6.2
+frozenlist==1.8.0
# via aiohttp
# via aiosignal
h11==0.16.0
@@ -45,31 +45,32 @@ httpx==0.28.1
# via httpx-aiohttp
httpx-aiohttp==0.1.9
# via contextual-client
-idna==3.4
+idna==3.11
# via anyio
# via httpx
# via yarl
-multidict==6.4.4
+multidict==6.7.0
# via aiohttp
# via yarl
-propcache==0.3.1
+propcache==0.4.1
# via aiohttp
# via yarl
-pydantic==2.11.9
+pydantic==2.12.5
# via contextual-client
-pydantic-core==2.33.2
+pydantic-core==2.41.5
# via pydantic
-sniffio==1.3.0
- # via anyio
+sniffio==1.3.1
# via contextual-client
-typing-extensions==4.12.2
+typing-extensions==4.15.0
+ # via aiosignal
# via anyio
# via contextual-client
+ # via exceptiongroup
# via multidict
# via pydantic
# via pydantic-core
# via typing-inspection
-typing-inspection==0.4.1
+typing-inspection==0.4.2
# via pydantic
-yarl==1.20.0
+yarl==1.22.0
# via aiohttp
diff --git a/scripts/lint b/scripts/lint
index 7f5dd6b..229a594 100755
--- a/scripts/lint
+++ b/scripts/lint
@@ -4,8 +4,13 @@ set -e
cd "$(dirname "$0")/.."
-echo "==> Running lints"
-rye run lint
+if [ "$1" = "--fix" ]; then
+ echo "==> Running lints with --fix"
+ rye run fix:ruff
+else
+ echo "==> Running lints"
+ rye run lint
+fi
echo "==> Making sure it imports"
rye run python -c 'import contextual'
diff --git a/src/contextual/_base_client.py b/src/contextual/_base_client.py
index c7fd3cb..025255f 100644
--- a/src/contextual/_base_client.py
+++ b/src/contextual/_base_client.py
@@ -1247,9 +1247,12 @@ def patch(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
- opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options)
+ opts = FinalRequestOptions.construct(
+ method="patch", url=path, json_data=body, files=to_httpx_files(files), **options
+ )
return self.request(cast_to, opts)
def put(
@@ -1767,9 +1770,12 @@ async def patch(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
- opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options)
+ opts = FinalRequestOptions.construct(
+ method="patch", url=path, json_data=body, files=await async_to_httpx_files(files), **options
+ )
return await self.request(cast_to, opts)
async def put(
diff --git a/src/contextual/_models.py b/src/contextual/_models.py
index fcec2cf..ca9500b 100644
--- a/src/contextual/_models.py
+++ b/src/contextual/_models.py
@@ -257,15 +257,16 @@ def model_dump(
mode: Literal["json", "python"] | str = "python",
include: IncEx | None = None,
exclude: IncEx | None = None,
+ context: Any | None = None,
by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
+ exclude_computed_fields: bool = False,
round_trip: bool = False,
warnings: bool | Literal["none", "warn", "error"] = True,
- context: dict[str, Any] | None = None,
- serialize_as_any: bool = False,
fallback: Callable[[Any], Any] | None = None,
+ serialize_as_any: bool = False,
) -> dict[str, Any]:
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump
@@ -273,16 +274,24 @@ def model_dump(
Args:
mode: The mode in which `to_python` should run.
- If mode is 'json', the dictionary will only contain JSON serializable types.
- If mode is 'python', the dictionary may contain any Python objects.
- include: A list of fields to include in the output.
- exclude: A list of fields to exclude from the output.
+ If mode is 'json', the output will only contain JSON serializable types.
+ If mode is 'python', the output may contain non-JSON-serializable Python objects.
+ include: A set of fields to include in the output.
+ exclude: A set of fields to exclude from the output.
+ context: Additional context to pass to the serializer.
by_alias: Whether to use the field's alias in the dictionary key if defined.
- exclude_unset: Whether to exclude fields that are unset or None from the output.
- exclude_defaults: Whether to exclude fields that are set to their default value from the output.
- exclude_none: Whether to exclude fields that have a value of `None` from the output.
- round_trip: Whether to enable serialization and deserialization round-trip support.
- warnings: Whether to log warnings when invalid fields are encountered.
+ exclude_unset: Whether to exclude fields that have not been explicitly set.
+ exclude_defaults: Whether to exclude fields that are set to their default value.
+ exclude_none: Whether to exclude fields that have a value of `None`.
+ exclude_computed_fields: Whether to exclude computed fields.
+ While this can be useful for round-tripping, it is usually recommended to use the dedicated
+ `round_trip` parameter instead.
+ round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T].
+ warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors,
+ "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError].
+ fallback: A function to call when an unknown value is encountered. If not provided,
+ a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError] error is raised.
+ serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
Returns:
A dictionary representation of the model.
@@ -299,6 +308,8 @@ def model_dump(
raise ValueError("serialize_as_any is only supported in Pydantic v2")
if fallback is not None:
raise ValueError("fallback is only supported in Pydantic v2")
+ if exclude_computed_fields != False:
+ raise ValueError("exclude_computed_fields is only supported in Pydantic v2")
dumped = super().dict( # pyright: ignore[reportDeprecated]
include=include,
exclude=exclude,
@@ -315,15 +326,17 @@ def model_dump_json(
self,
*,
indent: int | None = None,
+ ensure_ascii: bool = False,
include: IncEx | None = None,
exclude: IncEx | None = None,
+ context: Any | None = None,
by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
+ exclude_computed_fields: bool = False,
round_trip: bool = False,
warnings: bool | Literal["none", "warn", "error"] = True,
- context: dict[str, Any] | None = None,
fallback: Callable[[Any], Any] | None = None,
serialize_as_any: bool = False,
) -> str:
@@ -355,6 +368,10 @@ def model_dump_json(
raise ValueError("serialize_as_any is only supported in Pydantic v2")
if fallback is not None:
raise ValueError("fallback is only supported in Pydantic v2")
+ if ensure_ascii != False:
+ raise ValueError("ensure_ascii is only supported in Pydantic v2")
+ if exclude_computed_fields != False:
+ raise ValueError("exclude_computed_fields is only supported in Pydantic v2")
return super().json( # type: ignore[reportDeprecated]
indent=indent,
include=include,
diff --git a/src/contextual/_streaming.py b/src/contextual/_streaming.py
index deb13c2..837ce89 100644
--- a/src/contextual/_streaming.py
+++ b/src/contextual/_streaming.py
@@ -54,11 +54,12 @@ def __stream__(self) -> Iterator[_T]:
process_data = self._client._process_response_data
iterator = self._iter_events()
- for sse in iterator:
- yield process_data(data=sse.json(), cast_to=cast_to, response=response)
-
- # As we might not fully consume the response stream, we need to close it explicitly
- response.close()
+ try:
+ for sse in iterator:
+ yield process_data(data=sse.json(), cast_to=cast_to, response=response)
+ finally:
+ # Ensure the response is closed even if the consumer doesn't read all data
+ response.close()
def __enter__(self) -> Self:
return self
@@ -117,11 +118,12 @@ async def __stream__(self) -> AsyncIterator[_T]:
process_data = self._client._process_response_data
iterator = self._iter_events()
- async for sse in iterator:
- yield process_data(data=sse.json(), cast_to=cast_to, response=response)
-
- # As we might not fully consume the response stream, we need to close it explicitly
- await response.aclose()
+ try:
+ async for sse in iterator:
+ yield process_data(data=sse.json(), cast_to=cast_to, response=response)
+ finally:
+ # Ensure the response is closed even if the consumer doesn't read all data
+ await response.aclose()
async def __aenter__(self) -> Self:
return self
diff --git a/src/contextual/_types.py b/src/contextual/_types.py
index a6a1882..89db9e1 100644
--- a/src/contextual/_types.py
+++ b/src/contextual/_types.py
@@ -243,6 +243,9 @@ class HttpxSendArgs(TypedDict, total=False):
if TYPE_CHECKING:
# This works because str.__contains__ does not accept object (either in typeshed or at runtime)
# https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285
+ #
+ # Note: index() and count() methods are intentionally omitted to allow pyright to properly
+ # infer TypedDict types when dict literals are used in lists assigned to SequenceNotStr.
class SequenceNotStr(Protocol[_T_co]):
@overload
def __getitem__(self, index: SupportsIndex, /) -> _T_co: ...
@@ -251,8 +254,6 @@ def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ...
def __contains__(self, value: object, /) -> bool: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[_T_co]: ...
- def index(self, value: Any, start: int = 0, stop: int = ..., /) -> int: ...
- def count(self, value: Any, /) -> int: ...
def __reversed__(self) -> Iterator[_T_co]: ...
else:
# just point this to a normal `Sequence` at runtime to avoid having to special case
diff --git a/src/contextual/_version.py b/src/contextual/_version.py
index 5382f19..268a911 100644
--- a/src/contextual/_version.py
+++ b/src/contextual/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "contextual"
-__version__ = "0.10.0" # x-release-please-version
+__version__ = "0.11.0" # x-release-please-version
diff --git a/src/contextual/resources/agents/__init__.py b/src/contextual/resources/agents/__init__.py
index b3996d0..69aa01e 100644
--- a/src/contextual/resources/agents/__init__.py
+++ b/src/contextual/resources/agents/__init__.py
@@ -16,6 +16,14 @@
AgentsResourceWithStreamingResponse,
AsyncAgentsResourceWithStreamingResponse,
)
+from .templates import (
+ TemplatesResource,
+ AsyncTemplatesResource,
+ TemplatesResourceWithRawResponse,
+ AsyncTemplatesResourceWithRawResponse,
+ TemplatesResourceWithStreamingResponse,
+ AsyncTemplatesResourceWithStreamingResponse,
+)
__all__ = [
"QueryResource",
@@ -24,6 +32,12 @@
"AsyncQueryResourceWithRawResponse",
"QueryResourceWithStreamingResponse",
"AsyncQueryResourceWithStreamingResponse",
+ "TemplatesResource",
+ "AsyncTemplatesResource",
+ "TemplatesResourceWithRawResponse",
+ "AsyncTemplatesResourceWithRawResponse",
+ "TemplatesResourceWithStreamingResponse",
+ "AsyncTemplatesResourceWithStreamingResponse",
"AgentsResource",
"AsyncAgentsResource",
"AgentsResourceWithRawResponse",
diff --git a/src/contextual/resources/agents/agents.py b/src/contextual/resources/agents/agents.py
index 0bce0eb..9608344 100644
--- a/src/contextual/resources/agents/agents.py
+++ b/src/contextual/resources/agents/agents.py
@@ -14,10 +14,23 @@
QueryResourceWithStreamingResponse,
AsyncQueryResourceWithStreamingResponse,
)
-from ...types import agent_list_params, agent_create_params, agent_update_params
+from ...types import (
+ agent_list_params,
+ agent_create_params,
+ agent_update_params,
+ agent_save_template_params,
+)
from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
+from .templates import (
+ TemplatesResource,
+ AsyncTemplatesResource,
+ TemplatesResourceWithRawResponse,
+ AsyncTemplatesResourceWithRawResponse,
+ TemplatesResourceWithStreamingResponse,
+ AsyncTemplatesResourceWithStreamingResponse,
+)
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
to_raw_response_wrapper,
@@ -30,6 +43,8 @@
from ..._base_client import AsyncPaginator, make_request_options
from ...types.agent_configs_param import AgentConfigsParam
from ...types.create_agent_output import CreateAgentOutput
+from ...types.agent_delete_response import AgentDeleteResponse
+from ...types.agent_update_response import AgentUpdateResponse
from ...types.agent_metadata_response import AgentMetadataResponse
__all__ = ["AgentsResource", "AsyncAgentsResource"]
@@ -40,6 +55,10 @@ class AgentsResource(SyncAPIResource):
def query(self) -> QueryResource:
return QueryResource(self._client)
+ @cached_property
+ def templates(self) -> TemplatesResource:
+ return TemplatesResource(self._client)
+
@cached_property
def with_raw_response(self) -> AgentsResourceWithRawResponse:
"""
@@ -174,7 +193,7 @@ def update(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> object:
+ ) -> AgentUpdateResponse:
"""
Modify a given `Agent` to utilize the provided configuration.
@@ -236,7 +255,7 @@ def update(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=object,
+ cast_to=AgentUpdateResponse,
)
def list(
@@ -297,7 +316,7 @@ def delete(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> object:
+ ) -> AgentDeleteResponse:
"""Delete a given `Agent`.
This is an irreversible operation.
@@ -324,7 +343,7 @@ def delete(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=object,
+ cast_to=AgentDeleteResponse,
)
def copy(
@@ -438,12 +457,55 @@ def reset(
cast_to=object,
)
+ def save_template(
+ self,
+ agent_id: str,
+ *,
+ name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> object:
+ """
+ Save Template
+
+ Args:
+ agent_id: ID of the agent
+
+ name: The name of the template
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ return self._post(
+ f"/agents/{agent_id}/template",
+ body=maybe_transform({"name": name}, agent_save_template_params.AgentSaveTemplateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=object,
+ )
+
class AsyncAgentsResource(AsyncAPIResource):
@cached_property
def query(self) -> AsyncQueryResource:
return AsyncQueryResource(self._client)
+ @cached_property
+ def templates(self) -> AsyncTemplatesResource:
+ return AsyncTemplatesResource(self._client)
+
@cached_property
def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse:
"""
@@ -578,7 +640,7 @@ async def update(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> object:
+ ) -> AgentUpdateResponse:
"""
Modify a given `Agent` to utilize the provided configuration.
@@ -640,7 +702,7 @@ async def update(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=object,
+ cast_to=AgentUpdateResponse,
)
def list(
@@ -701,7 +763,7 @@ async def delete(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> object:
+ ) -> AgentDeleteResponse:
"""Delete a given `Agent`.
This is an irreversible operation.
@@ -728,7 +790,7 @@ async def delete(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=object,
+ cast_to=AgentDeleteResponse,
)
async def copy(
@@ -842,6 +904,45 @@ async def reset(
cast_to=object,
)
+ async def save_template(
+ self,
+ agent_id: str,
+ *,
+ name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> object:
+ """
+ Save Template
+
+ Args:
+ agent_id: ID of the agent
+
+ name: The name of the template
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ return await self._post(
+ f"/agents/{agent_id}/template",
+ body=await async_maybe_transform({"name": name}, agent_save_template_params.AgentSaveTemplateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=object,
+ )
+
class AgentsResourceWithRawResponse:
def __init__(self, agents: AgentsResource) -> None:
@@ -868,11 +969,18 @@ def __init__(self, agents: AgentsResource) -> None:
self.reset = to_raw_response_wrapper(
agents.reset,
)
+ self.save_template = to_raw_response_wrapper(
+ agents.save_template,
+ )
@cached_property
def query(self) -> QueryResourceWithRawResponse:
return QueryResourceWithRawResponse(self._agents.query)
+ @cached_property
+ def templates(self) -> TemplatesResourceWithRawResponse:
+ return TemplatesResourceWithRawResponse(self._agents.templates)
+
class AsyncAgentsResourceWithRawResponse:
def __init__(self, agents: AsyncAgentsResource) -> None:
@@ -899,11 +1007,18 @@ def __init__(self, agents: AsyncAgentsResource) -> None:
self.reset = async_to_raw_response_wrapper(
agents.reset,
)
+ self.save_template = async_to_raw_response_wrapper(
+ agents.save_template,
+ )
@cached_property
def query(self) -> AsyncQueryResourceWithRawResponse:
return AsyncQueryResourceWithRawResponse(self._agents.query)
+ @cached_property
+ def templates(self) -> AsyncTemplatesResourceWithRawResponse:
+ return AsyncTemplatesResourceWithRawResponse(self._agents.templates)
+
class AgentsResourceWithStreamingResponse:
def __init__(self, agents: AgentsResource) -> None:
@@ -930,11 +1045,18 @@ def __init__(self, agents: AgentsResource) -> None:
self.reset = to_streamed_response_wrapper(
agents.reset,
)
+ self.save_template = to_streamed_response_wrapper(
+ agents.save_template,
+ )
@cached_property
def query(self) -> QueryResourceWithStreamingResponse:
return QueryResourceWithStreamingResponse(self._agents.query)
+ @cached_property
+ def templates(self) -> TemplatesResourceWithStreamingResponse:
+ return TemplatesResourceWithStreamingResponse(self._agents.templates)
+
class AsyncAgentsResourceWithStreamingResponse:
def __init__(self, agents: AsyncAgentsResource) -> None:
@@ -961,7 +1083,14 @@ def __init__(self, agents: AsyncAgentsResource) -> None:
self.reset = async_to_streamed_response_wrapper(
agents.reset,
)
+ self.save_template = async_to_streamed_response_wrapper(
+ agents.save_template,
+ )
@cached_property
def query(self) -> AsyncQueryResourceWithStreamingResponse:
return AsyncQueryResourceWithStreamingResponse(self._agents.query)
+
+ @cached_property
+ def templates(self) -> AsyncTemplatesResourceWithStreamingResponse:
+ return AsyncTemplatesResourceWithStreamingResponse(self._agents.templates)
diff --git a/src/contextual/resources/agents/templates.py b/src/contextual/resources/agents/templates.py
new file mode 100644
index 0000000..a72eae5
--- /dev/null
+++ b/src/contextual/resources/agents/templates.py
@@ -0,0 +1,218 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Query, Headers, NotGiven, not_given
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.agent_metadata import AgentMetadata
+from ...types.agents.template_list_response import TemplateListResponse
+
+__all__ = ["TemplatesResource", "AsyncTemplatesResource"]
+
+
+class TemplatesResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> TemplatesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers
+ """
+ return TemplatesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> TemplatesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response
+ """
+ return TemplatesResourceWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ template: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentMetadata:
+ """
+ Get Template Configuration
+
+ Args:
+ template: Template for which to get config
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not template:
+ raise ValueError(f"Expected a non-empty value for `template` but received {template!r}")
+ return self._get(
+ f"/agents/templates/{template}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentMetadata,
+ )
+
+ def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> TemplateListResponse:
+ """Retrieve a list of all available Templates."""
+ return self._get(
+ "/agents/templates",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=TemplateListResponse,
+ )
+
+
+class AsyncTemplatesResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncTemplatesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncTemplatesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncTemplatesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response
+ """
+ return AsyncTemplatesResourceWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ template: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentMetadata:
+ """
+ Get Template Configuration
+
+ Args:
+ template: Template for which to get config
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not template:
+ raise ValueError(f"Expected a non-empty value for `template` but received {template!r}")
+ return await self._get(
+ f"/agents/templates/{template}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentMetadata,
+ )
+
+ async def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> TemplateListResponse:
+ """Retrieve a list of all available Templates."""
+ return await self._get(
+ "/agents/templates",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=TemplateListResponse,
+ )
+
+
+class TemplatesResourceWithRawResponse:
+ def __init__(self, templates: TemplatesResource) -> None:
+ self._templates = templates
+
+ self.retrieve = to_raw_response_wrapper(
+ templates.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ templates.list,
+ )
+
+
+class AsyncTemplatesResourceWithRawResponse:
+ def __init__(self, templates: AsyncTemplatesResource) -> None:
+ self._templates = templates
+
+ self.retrieve = async_to_raw_response_wrapper(
+ templates.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ templates.list,
+ )
+
+
+class TemplatesResourceWithStreamingResponse:
+ def __init__(self, templates: TemplatesResource) -> None:
+ self._templates = templates
+
+ self.retrieve = to_streamed_response_wrapper(
+ templates.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ templates.list,
+ )
+
+
+class AsyncTemplatesResourceWithStreamingResponse:
+ def __init__(self, templates: AsyncTemplatesResource) -> None:
+ self._templates = templates
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ templates.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ templates.list,
+ )
diff --git a/src/contextual/resources/datastores/__init__.py b/src/contextual/resources/datastores/__init__.py
index 2b127f3..2617099 100644
--- a/src/contextual/resources/datastores/__init__.py
+++ b/src/contextual/resources/datastores/__init__.py
@@ -1,5 +1,13 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+from .chunks import (
+ ChunksResource,
+ AsyncChunksResource,
+ ChunksResourceWithRawResponse,
+ AsyncChunksResourceWithRawResponse,
+ ChunksResourceWithStreamingResponse,
+ AsyncChunksResourceWithStreamingResponse,
+)
from .contents import (
ContentsResource,
AsyncContentsResource,
@@ -38,6 +46,12 @@
"AsyncContentsResourceWithRawResponse",
"ContentsResourceWithStreamingResponse",
"AsyncContentsResourceWithStreamingResponse",
+ "ChunksResource",
+ "AsyncChunksResource",
+ "ChunksResourceWithRawResponse",
+ "AsyncChunksResourceWithRawResponse",
+ "ChunksResourceWithStreamingResponse",
+ "AsyncChunksResourceWithStreamingResponse",
"DatastoresResource",
"AsyncDatastoresResource",
"DatastoresResourceWithRawResponse",
diff --git a/src/contextual/resources/datastores/chunks.py b/src/contextual/resources/datastores/chunks.py
new file mode 100644
index 0000000..640c55a
--- /dev/null
+++ b/src/contextual/resources/datastores/chunks.py
@@ -0,0 +1,193 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Query, Headers, NotGiven, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.datastores import chunk_update_content_params
+from ...types.datastores.chunk_update_content_response import ChunkUpdateContentResponse
+
+__all__ = ["ChunksResource", "AsyncChunksResource"]
+
+
+class ChunksResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> ChunksResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers
+ """
+ return ChunksResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ChunksResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response
+ """
+ return ChunksResourceWithStreamingResponse(self)
+
+ def update_content(
+ self,
+ content_id: str,
+ *,
+ datastore_id: str,
+ content: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ChunkUpdateContentResponse:
+ """Edit the content of a specific chunk in a datastore.
+
+ This operation updates the
+ chunk's text content and regenerates its embeddings.
+
+ Args:
+ datastore_id: Datastore ID of the datastore containing the chunk
+
+ content_id: Content ID of the chunk to edit
+
+ content: The new content text for the chunk
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not datastore_id:
+ raise ValueError(f"Expected a non-empty value for `datastore_id` but received {datastore_id!r}")
+ if not content_id:
+ raise ValueError(f"Expected a non-empty value for `content_id` but received {content_id!r}")
+ return self._put(
+ f"/datastores/{datastore_id}/chunks/{content_id}/content",
+ body=maybe_transform({"content": content}, chunk_update_content_params.ChunkUpdateContentParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ChunkUpdateContentResponse,
+ )
+
+
+class AsyncChunksResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncChunksResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncChunksResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncChunksResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response
+ """
+ return AsyncChunksResourceWithStreamingResponse(self)
+
+ async def update_content(
+ self,
+ content_id: str,
+ *,
+ datastore_id: str,
+ content: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ChunkUpdateContentResponse:
+ """Edit the content of a specific chunk in a datastore.
+
+ This operation updates the
+ chunk's text content and regenerates its embeddings.
+
+ Args:
+ datastore_id: Datastore ID of the datastore containing the chunk
+
+ content_id: Content ID of the chunk to edit
+
+ content: The new content text for the chunk
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not datastore_id:
+ raise ValueError(f"Expected a non-empty value for `datastore_id` but received {datastore_id!r}")
+ if not content_id:
+ raise ValueError(f"Expected a non-empty value for `content_id` but received {content_id!r}")
+ return await self._put(
+ f"/datastores/{datastore_id}/chunks/{content_id}/content",
+ body=await async_maybe_transform(
+ {"content": content}, chunk_update_content_params.ChunkUpdateContentParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ChunkUpdateContentResponse,
+ )
+
+
+class ChunksResourceWithRawResponse:
+ def __init__(self, chunks: ChunksResource) -> None:
+ self._chunks = chunks
+
+ self.update_content = to_raw_response_wrapper(
+ chunks.update_content,
+ )
+
+
+class AsyncChunksResourceWithRawResponse:
+ def __init__(self, chunks: AsyncChunksResource) -> None:
+ self._chunks = chunks
+
+ self.update_content = async_to_raw_response_wrapper(
+ chunks.update_content,
+ )
+
+
+class ChunksResourceWithStreamingResponse:
+ def __init__(self, chunks: ChunksResource) -> None:
+ self._chunks = chunks
+
+ self.update_content = to_streamed_response_wrapper(
+ chunks.update_content,
+ )
+
+
+class AsyncChunksResourceWithStreamingResponse:
+ def __init__(self, chunks: AsyncChunksResource) -> None:
+ self._chunks = chunks
+
+ self.update_content = async_to_streamed_response_wrapper(
+ chunks.update_content,
+ )
diff --git a/src/contextual/resources/datastores/datastores.py b/src/contextual/resources/datastores/datastores.py
index 5442461..bbcd7b3 100644
--- a/src/contextual/resources/datastores/datastores.py
+++ b/src/contextual/resources/datastores/datastores.py
@@ -4,7 +4,19 @@
import httpx
-from ...types import datastore_list_params, datastore_create_params, datastore_update_params
+from .chunks import (
+ ChunksResource,
+ AsyncChunksResource,
+ ChunksResourceWithRawResponse,
+ AsyncChunksResourceWithRawResponse,
+ ChunksResourceWithStreamingResponse,
+ AsyncChunksResourceWithStreamingResponse,
+)
+from ...types import (
+ datastore_list_params,
+ datastore_create_params,
+ datastore_update_params,
+)
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from .contents import (
@@ -35,8 +47,11 @@
from ..._base_client import AsyncPaginator, make_request_options
from ...types.datastore import Datastore
from ...types.datastore_metadata import DatastoreMetadata
+from ...types.datastore_reset_response import DatastoreResetResponse
from ...types.create_datastore_response import CreateDatastoreResponse
+from ...types.datastore_delete_response import DatastoreDeleteResponse
from ...types.datastore_update_response import DatastoreUpdateResponse
+from ...types.unstructured_datastore_config_model_param import UnstructuredDatastoreConfigModelParam
__all__ = ["DatastoresResource", "AsyncDatastoresResource"]
@@ -50,6 +65,10 @@ def documents(self) -> DocumentsResource:
def contents(self) -> ContentsResource:
return ContentsResource(self._client)
+ @cached_property
+ def chunks(self) -> ChunksResource:
+ return ChunksResource(self._client)
+
@cached_property
def with_raw_response(self) -> DatastoresResourceWithRawResponse:
"""
@@ -73,7 +92,7 @@ def create(
self,
*,
name: str,
- configuration: datastore_create_params.Configuration | Omit = omit,
+ configuration: UnstructuredDatastoreConfigModelParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -131,7 +150,7 @@ def update(
self,
datastore_id: str,
*,
- configuration: datastore_update_params.Configuration | Omit = omit,
+ configuration: UnstructuredDatastoreConfigModelParam | Omit = omit,
name: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -243,7 +262,7 @@ def delete(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> object:
+ ) -> DatastoreDeleteResponse:
"""Delete a given `Datastore`, including all the documents ingested into it.
This
@@ -270,7 +289,7 @@ def delete(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=object,
+ cast_to=DatastoreDeleteResponse,
)
def metadata(
@@ -319,7 +338,7 @@ def reset(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> object:
+ ) -> DatastoreResetResponse:
"""Reset the give `Datastore`.
This operation is irreversible and it deletes all
@@ -343,7 +362,7 @@ def reset(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=object,
+ cast_to=DatastoreResetResponse,
)
@@ -356,6 +375,10 @@ def documents(self) -> AsyncDocumentsResource:
def contents(self) -> AsyncContentsResource:
return AsyncContentsResource(self._client)
+ @cached_property
+ def chunks(self) -> AsyncChunksResource:
+ return AsyncChunksResource(self._client)
+
@cached_property
def with_raw_response(self) -> AsyncDatastoresResourceWithRawResponse:
"""
@@ -379,7 +402,7 @@ async def create(
self,
*,
name: str,
- configuration: datastore_create_params.Configuration | Omit = omit,
+ configuration: UnstructuredDatastoreConfigModelParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -437,7 +460,7 @@ async def update(
self,
datastore_id: str,
*,
- configuration: datastore_update_params.Configuration | Omit = omit,
+ configuration: UnstructuredDatastoreConfigModelParam | Omit = omit,
name: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -549,7 +572,7 @@ async def delete(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> object:
+ ) -> DatastoreDeleteResponse:
"""Delete a given `Datastore`, including all the documents ingested into it.
This
@@ -576,7 +599,7 @@ async def delete(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=object,
+ cast_to=DatastoreDeleteResponse,
)
async def metadata(
@@ -625,7 +648,7 @@ async def reset(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> object:
+ ) -> DatastoreResetResponse:
"""Reset the give `Datastore`.
This operation is irreversible and it deletes all
@@ -649,7 +672,7 @@ async def reset(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=object,
+ cast_to=DatastoreResetResponse,
)
@@ -684,6 +707,10 @@ def documents(self) -> DocumentsResourceWithRawResponse:
def contents(self) -> ContentsResourceWithRawResponse:
return ContentsResourceWithRawResponse(self._datastores.contents)
+ @cached_property
+ def chunks(self) -> ChunksResourceWithRawResponse:
+ return ChunksResourceWithRawResponse(self._datastores.chunks)
+
class AsyncDatastoresResourceWithRawResponse:
def __init__(self, datastores: AsyncDatastoresResource) -> None:
@@ -716,6 +743,10 @@ def documents(self) -> AsyncDocumentsResourceWithRawResponse:
def contents(self) -> AsyncContentsResourceWithRawResponse:
return AsyncContentsResourceWithRawResponse(self._datastores.contents)
+ @cached_property
+ def chunks(self) -> AsyncChunksResourceWithRawResponse:
+ return AsyncChunksResourceWithRawResponse(self._datastores.chunks)
+
class DatastoresResourceWithStreamingResponse:
def __init__(self, datastores: DatastoresResource) -> None:
@@ -748,6 +779,10 @@ def documents(self) -> DocumentsResourceWithStreamingResponse:
def contents(self) -> ContentsResourceWithStreamingResponse:
return ContentsResourceWithStreamingResponse(self._datastores.contents)
+ @cached_property
+ def chunks(self) -> ChunksResourceWithStreamingResponse:
+ return ChunksResourceWithStreamingResponse(self._datastores.chunks)
+
class AsyncDatastoresResourceWithStreamingResponse:
def __init__(self, datastores: AsyncDatastoresResource) -> None:
@@ -779,3 +814,7 @@ def documents(self) -> AsyncDocumentsResourceWithStreamingResponse:
@cached_property
def contents(self) -> AsyncContentsResourceWithStreamingResponse:
return AsyncContentsResourceWithStreamingResponse(self._datastores.contents)
+
+ @cached_property
+ def chunks(self) -> AsyncChunksResourceWithStreamingResponse:
+ return AsyncChunksResourceWithStreamingResponse(self._datastores.chunks)
diff --git a/src/contextual/resources/datastores/documents.py b/src/contextual/resources/datastores/documents.py
index 98c2b70..14f8c97 100644
--- a/src/contextual/resources/datastores/documents.py
+++ b/src/contextual/resources/datastores/documents.py
@@ -28,6 +28,7 @@
)
from ...types.datastores.document_metadata import DocumentMetadata
from ...types.datastores.ingestion_response import IngestionResponse
+from ...types.datastores.document_delete_response import DocumentDeleteResponse
from ...types.datastores.document_get_parse_result_response import DocumentGetParseResultResponse
__all__ = ["DocumentsResource", "AsyncDocumentsResource"]
@@ -141,7 +142,7 @@ def delete(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> object:
+ ) -> DocumentDeleteResponse:
"""Delete a given document from its `Datastore`.
This operation is irreversible.
@@ -168,7 +169,7 @@ def delete(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=object,
+ cast_to=DocumentDeleteResponse,
)
def get_parse_result(
@@ -534,7 +535,7 @@ async def delete(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> object:
+ ) -> DocumentDeleteResponse:
"""Delete a given document from its `Datastore`.
This operation is irreversible.
@@ -561,7 +562,7 @@ async def delete(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=object,
+ cast_to=DocumentDeleteResponse,
)
async def get_parse_result(
diff --git a/src/contextual/types/__init__.py b/src/contextual/types/__init__.py
index 1dec368..e23e166 100644
--- a/src/contextual/types/__init__.py
+++ b/src/contextual/types/__init__.py
@@ -5,16 +5,21 @@
from . import datastores, agent_configs, agent_metadata, filter_and_rerank_config
from .. import _compat
from .agent import Agent as Agent
+from .shared import ParsedBlock as ParsedBlock
from .datastore import Datastore as Datastore
+from .acl_config import ACLConfig as ACLConfig
from .agent_configs import AgentConfigs as AgentConfigs
from .global_config import GlobalConfig as GlobalConfig
from .agent_metadata import AgentMetadata as AgentMetadata
from .new_user_param import NewUserParam as NewUserParam
+from .acl_config_param import ACLConfigParam as ACLConfigParam
from .retrieval_config import RetrievalConfig as RetrievalConfig
from .user_list_params import UserListParams as UserListParams
from .agent_list_params import AgentListParams as AgentListParams
from .parse_jobs_params import ParseJobsParams as ParseJobsParams
from .datastore_metadata import DatastoreMetadata as DatastoreMetadata
+from .html_configuration import HTMLConfiguration as HTMLConfiguration
+from .translation_config import TranslationConfig as TranslationConfig
from .user_invite_params import UserInviteParams as UserInviteParams
from .user_update_params import UserUpdateParams as UserUpdateParams
from .agent_configs_param import AgentConfigsParam as AgentConfigsParam
@@ -27,10 +32,14 @@
from .parse_jobs_response import ParseJobsResponse as ParseJobsResponse
from .list_agents_response import ListAgentsResponse as ListAgentsResponse
from .lmunit_create_params import LMUnitCreateParams as LMUnitCreateParams
+from .reformulation_config import ReformulationConfig as ReformulationConfig
from .rerank_create_params import RerankCreateParams as RerankCreateParams
+from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse
+from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse
from .datastore_list_params import DatastoreListParams as DatastoreListParams
from .invite_users_response import InviteUsersResponse as InviteUsersResponse
from .parse_create_response import ParseCreateResponse as ParseCreateResponse
+from .chunking_configuration import ChunkingConfiguration as ChunkingConfiguration
from .generate_create_params import GenerateCreateParams as GenerateCreateParams
from .lmunit_create_response import LMUnitCreateResponse as LMUnitCreateResponse
from .rerank_create_response import RerankCreateResponse as RerankCreateResponse
@@ -39,17 +48,30 @@
from .agent_metadata_response import AgentMetadataResponse as AgentMetadataResponse
from .datastore_create_params import DatastoreCreateParams as DatastoreCreateParams
from .datastore_update_params import DatastoreUpdateParams as DatastoreUpdateParams
+from .datastore_reset_response import DatastoreResetResponse as DatastoreResetResponse
from .filter_and_rerank_config import FilterAndRerankConfig as FilterAndRerankConfig
from .generate_create_response import GenerateCreateResponse as GenerateCreateResponse
from .generate_response_config import GenerateResponseConfig as GenerateResponseConfig
+from .html_configuration_param import HTMLConfigurationParam as HTMLConfigurationParam
from .list_datastores_response import ListDatastoresResponse as ListDatastoresResponse
from .parse_job_results_params import ParseJobResultsParams as ParseJobResultsParams
+from .translation_config_param import TranslationConfigParam as TranslationConfigParam
from .create_datastore_response import CreateDatastoreResponse as CreateDatastoreResponse
+from .datastore_delete_response import DatastoreDeleteResponse as DatastoreDeleteResponse
from .datastore_update_response import DatastoreUpdateResponse as DatastoreUpdateResponse
from .parse_job_status_response import ParseJobStatusResponse as ParseJobStatusResponse
+from .agent_save_template_params import AgentSaveTemplateParams as AgentSaveTemplateParams
from .parse_job_results_response import ParseJobResultsResponse as ParseJobResultsResponse
+from .reformulation_config_param import ReformulationConfigParam as ReformulationConfigParam
+from .chunking_configuration_param import ChunkingConfigurationParam as ChunkingConfigurationParam
+from .datastore_parse_configuration import DatastoreParseConfiguration as DatastoreParseConfiguration
from .filter_and_rerank_config_param import FilterAndRerankConfigParam as FilterAndRerankConfigParam
from .generate_response_config_param import GenerateResponseConfigParam as GenerateResponseConfigParam
+from .datastore_parse_configuration_param import DatastoreParseConfigurationParam as DatastoreParseConfigurationParam
+from .unstructured_datastore_config_model import UnstructuredDatastoreConfigModel as UnstructuredDatastoreConfigModel
+from .unstructured_datastore_config_model_param import (
+ UnstructuredDatastoreConfigModelParam as UnstructuredDatastoreConfigModelParam,
+)
# Rebuild cyclical models only after all modules are imported.
# This ensures that, when building the deferred (due to cyclical references) model schema,
diff --git a/src/contextual/types/acl_config.py b/src/contextual/types/acl_config.py
new file mode 100644
index 0000000..93e09df
--- /dev/null
+++ b/src/contextual/types/acl_config.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["ACLConfig"]
+
+
+class ACLConfig(BaseModel):
+ """Captures ACL configurations for an Agent"""
+
+ acl_active: Optional[bool] = None
+ """Whether to enable ACL."""
+
+ acl_yaml: Optional[str] = None
+ """The YAML file to use for ACL."""
diff --git a/src/contextual/types/acl_config_param.py b/src/contextual/types/acl_config_param.py
new file mode 100644
index 0000000..d18d8b7
--- /dev/null
+++ b/src/contextual/types/acl_config_param.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["ACLConfigParam"]
+
+
+class ACLConfigParam(TypedDict, total=False):
+ """Captures ACL configurations for an Agent"""
+
+ acl_active: bool
+ """Whether to enable ACL."""
+
+ acl_yaml: str
+ """The YAML file to use for ACL."""
diff --git a/src/contextual/types/agent_configs.py b/src/contextual/types/agent_configs.py
index 1b7f9d4..c88eaf5 100644
--- a/src/contextual/types/agent_configs.py
+++ b/src/contextual/types/agent_configs.py
@@ -5,44 +5,19 @@
from typing import Optional
from .._models import BaseModel
+from .acl_config import ACLConfig
from .global_config import GlobalConfig
from .retrieval_config import RetrievalConfig
+from .translation_config import TranslationConfig
+from .reformulation_config import ReformulationConfig
from .generate_response_config import GenerateResponseConfig
-__all__ = ["AgentConfigs", "ACLConfig", "ReformulationConfig", "TranslationConfig"]
-
-
-class ACLConfig(BaseModel):
- acl_active: Optional[bool] = None
- """Whether to enable ACL."""
-
- acl_yaml: Optional[str] = None
- """The YAML file to use for ACL."""
-
-
-class ReformulationConfig(BaseModel):
- enable_query_decomposition: Optional[bool] = None
- """Whether to enable query decomposition."""
-
- enable_query_expansion: Optional[bool] = None
- """Whether to enable query expansion."""
-
- query_decomposition_prompt: Optional[str] = None
- """The prompt to use for query decomposition."""
-
- query_expansion_prompt: Optional[str] = None
- """The prompt to use for query expansion."""
-
-
-class TranslationConfig(BaseModel):
- translate_confidence: Optional[float] = None
- """The confidence threshold for translation."""
-
- translate_needed: Optional[bool] = None
- """Whether to enable translation for the agent's responses."""
+__all__ = ["AgentConfigs"]
class AgentConfigs(BaseModel):
+ """Response to configs for different components"""
+
acl_config: Optional[ACLConfig] = None
"""Parameters that affect the agent's ACL workflow"""
diff --git a/src/contextual/types/agent_configs_param.py b/src/contextual/types/agent_configs_param.py
index 822e02a..3c8f84f 100644
--- a/src/contextual/types/agent_configs_param.py
+++ b/src/contextual/types/agent_configs_param.py
@@ -4,45 +4,20 @@
from typing_extensions import TypedDict
+from .acl_config_param import ACLConfigParam
from .global_config_param import GlobalConfigParam
from .retrieval_config_param import RetrievalConfigParam
+from .translation_config_param import TranslationConfigParam
+from .reformulation_config_param import ReformulationConfigParam
from .generate_response_config_param import GenerateResponseConfigParam
-__all__ = ["AgentConfigsParam", "ACLConfig", "ReformulationConfig", "TranslationConfig"]
-
-
-class ACLConfig(TypedDict, total=False):
- acl_active: bool
- """Whether to enable ACL."""
-
- acl_yaml: str
- """The YAML file to use for ACL."""
-
-
-class ReformulationConfig(TypedDict, total=False):
- enable_query_decomposition: bool
- """Whether to enable query decomposition."""
-
- enable_query_expansion: bool
- """Whether to enable query expansion."""
-
- query_decomposition_prompt: str
- """The prompt to use for query decomposition."""
-
- query_expansion_prompt: str
- """The prompt to use for query expansion."""
-
-
-class TranslationConfig(TypedDict, total=False):
- translate_confidence: float
- """The confidence threshold for translation."""
-
- translate_needed: bool
- """Whether to enable translation for the agent's responses."""
+__all__ = ["AgentConfigsParam"]
class AgentConfigsParam(TypedDict, total=False):
- acl_config: ACLConfig
+ """Response to configs for different components"""
+
+ acl_config: ACLConfigParam
"""Parameters that affect the agent's ACL workflow"""
filter_and_rerank_config: "FilterAndRerankConfigParam"
@@ -54,13 +29,13 @@ class AgentConfigsParam(TypedDict, total=False):
global_config: GlobalConfigParam
"""Parameters that affect the agent's overall RAG workflow"""
- reformulation_config: ReformulationConfig
+ reformulation_config: ReformulationConfigParam
"""Parameters that affect the agent's query reformulation"""
retrieval_config: RetrievalConfigParam
"""Parameters that affect how the agent retrieves from datastore(s)"""
- translation_config: TranslationConfig
+ translation_config: TranslationConfigParam
"""Parameters that affect the agent's translation workflow"""
diff --git a/src/contextual/types/agent_delete_response.py b/src/contextual/types/agent_delete_response.py
new file mode 100644
index 0000000..d3b3e49
--- /dev/null
+++ b/src/contextual/types/agent_delete_response.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .._models import BaseModel
+
+__all__ = ["AgentDeleteResponse"]
+
+
+class AgentDeleteResponse(BaseModel):
+ pass
diff --git a/src/contextual/types/agent_metadata.py b/src/contextual/types/agent_metadata.py
index 6598f8e..eb04b8a 100644
--- a/src/contextual/types/agent_metadata.py
+++ b/src/contextual/types/agent_metadata.py
@@ -10,6 +10,8 @@
class AgentUsages(BaseModel):
+ """Total API request counts for the agent."""
+
eval: int
"""eval request count"""
@@ -21,6 +23,8 @@ class AgentUsages(BaseModel):
class AgentMetadata(BaseModel):
+ """Response to GET Agent request"""
+
datastore_ids: List[str]
"""The IDs of the datastore(s) associated with the agent"""
diff --git a/src/contextual/types/agent_metadata_response.py b/src/contextual/types/agent_metadata_response.py
index 0bb4734..4d5dc3b 100644
--- a/src/contextual/types/agent_metadata_response.py
+++ b/src/contextual/types/agent_metadata_response.py
@@ -11,6 +11,8 @@
class GetTwilightAgentResponseAgentUsages(BaseModel):
+ """Total API request counts for the agent."""
+
eval: int
"""eval request count"""
@@ -22,6 +24,8 @@ class GetTwilightAgentResponseAgentUsages(BaseModel):
class GetTwilightAgentResponse(BaseModel):
+ """Response to GET Agent request"""
+
datastore_ids: List[str]
"""The IDs of the datastore(s) associated with the agent"""
diff --git a/src/contextual/types/agent_save_template_params.py b/src/contextual/types/agent_save_template_params.py
new file mode 100644
index 0000000..9b63785
--- /dev/null
+++ b/src/contextual/types/agent_save_template_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["AgentSaveTemplateParams"]
+
+
+class AgentSaveTemplateParams(TypedDict, total=False):
+ name: Required[str]
+ """The name of the template"""
diff --git a/src/contextual/types/agent_update_response.py b/src/contextual/types/agent_update_response.py
new file mode 100644
index 0000000..90ada7a
--- /dev/null
+++ b/src/contextual/types/agent_update_response.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .._models import BaseModel
+
+__all__ = ["AgentUpdateResponse"]
+
+
+class AgentUpdateResponse(BaseModel):
+ pass
diff --git a/src/contextual/types/agents/__init__.py b/src/contextual/types/agents/__init__.py
index 6f82403..3499504 100644
--- a/src/contextual/types/agents/__init__.py
+++ b/src/contextual/types/agents/__init__.py
@@ -7,6 +7,7 @@
from .query_metrics_params import QueryMetricsParams as QueryMetricsParams
from .query_feedback_params import QueryFeedbackParams as QueryFeedbackParams
from .query_metrics_response import QueryMetricsResponse as QueryMetricsResponse
+from .template_list_response import TemplateListResponse as TemplateListResponse
from .query_feedback_response import QueryFeedbackResponse as QueryFeedbackResponse
from .retrieval_info_response import RetrievalInfoResponse as RetrievalInfoResponse
from .query_retrieval_info_params import QueryRetrievalInfoParams as QueryRetrievalInfoParams
diff --git a/src/contextual/types/agents/query_create_params.py b/src/contextual/types/agents/query_create_params.py
index ed08da1..41d9064 100644
--- a/src/contextual/types/agents/query_create_params.py
+++ b/src/contextual/types/agents/query_create_params.py
@@ -102,6 +102,8 @@ class QueryCreateParams(TypedDict, total=False):
class Message(TypedDict, total=False):
+ """Message object for a message sent or received in a conversation"""
+
content: Required[str]
"""Content of the message"""
@@ -116,6 +118,10 @@ class Message(TypedDict, total=False):
class OverrideConfiguration(TypedDict, total=False):
+ """
+ This will modify select configuration parameters for the agent during the response generation.
+ """
+
enable_filter: bool
"""Override the filter_retrievals for the query.
@@ -184,6 +190,8 @@ class OverrideConfiguration(TypedDict, total=False):
class StructuredOutput(TypedDict, total=False):
+ """Custom output structure format."""
+
json_schema: Required[Dict[str, object]]
"""The output json structure."""
diff --git a/src/contextual/types/agents/query_feedback_response.py b/src/contextual/types/agents/query_feedback_response.py
index 303902b..d0d4070 100644
--- a/src/contextual/types/agents/query_feedback_response.py
+++ b/src/contextual/types/agents/query_feedback_response.py
@@ -6,5 +6,7 @@
class QueryFeedbackResponse(BaseModel):
+ """Response schema for feedback submission endpoint."""
+
feedback_id: str
"""ID of the submitted or updated feedback."""
diff --git a/src/contextual/types/agents/query_response.py b/src/contextual/types/agents/query_response.py
index 82e1b41..6b12050 100644
--- a/src/contextual/types/agents/query_response.py
+++ b/src/contextual/types/agents/query_response.py
@@ -19,6 +19,8 @@
class RetrievalContentCtxlMetadata(BaseModel):
+ """Default metadata from the retrieval"""
+
chunk_id: Optional[str] = None
"""Unique identifier for the chunk."""
@@ -80,6 +82,8 @@ class RetrievalContentCustomMetadataConfig(BaseModel):
class RetrievalContent(BaseModel):
+ """Retrieval content object typing for v0.1 API."""
+
content_id: str
"""Unique identifier of the retrieved content"""
@@ -146,6 +150,8 @@ class RetrievalContent(BaseModel):
class Attribution(BaseModel):
+ """Attribution for some claim made in a generated message`."""
+
content_ids: List[str]
"""Content IDs of the sources for the attributed text"""
@@ -157,6 +163,8 @@ class Attribution(BaseModel):
class GroundednessScore(BaseModel):
+ """Groundedness scores in a generated message`."""
+
end_idx: int
"""End index of the span in the generated message"""
@@ -168,6 +176,8 @@ class GroundednessScore(BaseModel):
class Message(BaseModel):
+ """Response to the query request"""
+
content: str
"""Content of the message"""
@@ -179,6 +189,8 @@ class Message(BaseModel):
class QueryResponse(BaseModel):
+ """Response body for POST /query"""
+
conversation_id: str
"""A unique identifier for the conversation.
diff --git a/src/contextual/types/agents/template_list_response.py b/src/contextual/types/agents/template_list_response.py
new file mode 100644
index 0000000..7911870
--- /dev/null
+++ b/src/contextual/types/agents/template_list_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["TemplateListResponse"]
+
+
+class TemplateListResponse(BaseModel):
+ templates: Optional[List[str]] = None
+ """List of available templates."""
diff --git a/src/contextual/types/chunking_configuration.py b/src/contextual/types/chunking_configuration.py
new file mode 100644
index 0000000..8217f82
--- /dev/null
+++ b/src/contextual/types/chunking_configuration.py
@@ -0,0 +1,41 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["ChunkingConfiguration"]
+
+
+class ChunkingConfiguration(BaseModel):
+ """Configuration for document chunking settings."""
+
+ chunking_mode: Optional[Literal["hierarchy_depth", "hierarchy_heading", "static_length", "page_level"]] = None
+ """Chunking mode to use.
+
+ Options are: `hierarchy_depth`, `hierarchy_heading`, `static_length`,
+ `page_level`. `hierarchy_depth` groups chunks of the same hierarchy level or
+ below, additionally merging or splitting based on length constraints.
+ `hierarchy_heading` splits chunks at every heading in the document hierarchy,
+ additionally merging or splitting based on length constraints. `static_length`
+ creates chunks of a fixed length. `page_level` creates chunks that cannot run
+ over page boundaries.
+ """
+
+ enable_hierarchy_based_contextualization: Optional[bool] = None
+ """Whether to enable section-based contextualization for chunking"""
+
+ max_chunk_length_tokens: Optional[int] = None
+ """Target maximum length of text tokens chunks for chunking.
+
+ Chunk length may exceed this value in some edge cases.
+ """
+
+ min_chunk_length_tokens: Optional[int] = None
+ """Target minimum length of chunks in tokens.
+
+ Must be at least 384 tokens less than `max_chunk_length_tokens`. Chunk length
+ may be shorter than this value in some edge cases. Ignored if `chunking_mode` is
+ `page_level`.
+ """
diff --git a/src/contextual/types/chunking_configuration_param.py b/src/contextual/types/chunking_configuration_param.py
new file mode 100644
index 0000000..957d7fc
--- /dev/null
+++ b/src/contextual/types/chunking_configuration_param.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["ChunkingConfigurationParam"]
+
+
+class ChunkingConfigurationParam(TypedDict, total=False):
+ """Configuration for document chunking settings."""
+
+ chunking_mode: Literal["hierarchy_depth", "hierarchy_heading", "static_length", "page_level"]
+ """Chunking mode to use.
+
+ Options are: `hierarchy_depth`, `hierarchy_heading`, `static_length`,
+ `page_level`. `hierarchy_depth` groups chunks of the same hierarchy level or
+ below, additionally merging or splitting based on length constraints.
+ `hierarchy_heading` splits chunks at every heading in the document hierarchy,
+ additionally merging or splitting based on length constraints. `static_length`
+ creates chunks of a fixed length. `page_level` creates chunks that cannot run
+ over page boundaries.
+ """
+
+ enable_hierarchy_based_contextualization: bool
+ """Whether to enable section-based contextualization for chunking"""
+
+ max_chunk_length_tokens: int
+ """Target maximum length of text tokens chunks for chunking.
+
+ Chunk length may exceed this value in some edge cases.
+ """
+
+ min_chunk_length_tokens: int
+ """Target minimum length of chunks in tokens.
+
+ Must be at least 384 tokens less than `max_chunk_length_tokens`. Chunk length
+ may be shorter than this value in some edge cases. Ignored if `chunking_mode` is
+ `page_level`.
+ """
diff --git a/src/contextual/types/create_agent_output.py b/src/contextual/types/create_agent_output.py
index d845c54..789ca75 100644
--- a/src/contextual/types/create_agent_output.py
+++ b/src/contextual/types/create_agent_output.py
@@ -8,6 +8,8 @@
class CreateAgentOutput(BaseModel):
+ """Response to POST /agents request"""
+
id: str
"""ID of the agent"""
diff --git a/src/contextual/types/datastore.py b/src/contextual/types/datastore.py
index d440903..a7a8846 100644
--- a/src/contextual/types/datastore.py
+++ b/src/contextual/types/datastore.py
@@ -5,91 +5,14 @@
from typing_extensions import Literal
from .._models import BaseModel
+from .unstructured_datastore_config_model import UnstructuredDatastoreConfigModel
-__all__ = ["Datastore", "Configuration", "ConfigurationChunking", "ConfigurationHTMLConfig", "ConfigurationParsing"]
-
-
-class ConfigurationChunking(BaseModel):
- chunking_mode: Optional[Literal["hierarchy_depth", "hierarchy_heading", "static_length", "page_level"]] = None
- """Chunking mode to use.
-
- Options are: `hierarchy_depth`, `hierarchy_heading`, `static_length`,
- `page_level`. `hierarchy_depth` groups chunks of the same hierarchy level or
- below, additionally merging or splitting based on length constraints.
- `hierarchy_heading` splits chunks at every heading in the document hierarchy,
- additionally merging or splitting based on length constraints. `static_length`
- creates chunks of a fixed length. `page_level` creates chunks that cannot run
- over page boundaries.
- """
-
- enable_hierarchy_based_contextualization: Optional[bool] = None
- """Whether to enable section-based contextualization for chunking"""
-
- max_chunk_length_tokens: Optional[int] = None
- """Target maximum length of text tokens chunks for chunking.
-
- Chunk length may exceed this value in some edge cases.
- """
-
- min_chunk_length_tokens: Optional[int] = None
- """Target minimum length of chunks in tokens.
-
- Must be at least 384 tokens less than `max_chunk_length_tokens`. Chunk length
- may be shorter than this value in some edge cases. Ignored if `chunking_mode` is
- `page_level`.
- """
-
-
-class ConfigurationHTMLConfig(BaseModel):
- max_chunk_length_tokens: Optional[int] = None
- """Target maximum length of text tokens chunks for chunking.
-
- Chunk length may exceed this value in some edge cases.
- """
-
-
-class ConfigurationParsing(BaseModel):
- enable_split_tables: Optional[bool] = None
- """
- Whether to enable table splitting, which splits large tables into smaller tables
- with at most `max_split_table_cells` cells each. In each split table, the table
- headers are reproduced as the first row(s). This is useful for preserving
- context when tables are too large to fit into one chunk.
- """
-
- figure_caption_mode: Optional[Literal["default", "custom", "ignore"]] = None
- """Mode for figure captioning.
-
- Options are `default`, `custom`, or `ignore`. Set to `ignore` to disable figure
- captioning. Set to `default` to use the default figure prompt, which generates a
- detailed caption for each figure. Set to `custom` to use a custom prompt.
- """
-
- figure_captioning_prompt: Optional[str] = None
- """Prompt to use for generating image captions.
-
- Must be non-empty if `figure_caption_mode` is `custom`. Otherwise, must be null.
- """
-
- max_split_table_cells: Optional[int] = None
- """Maximum number of cells for split tables.
-
- Ignored if `enable_split_tables` is False.
- """
-
-
-class Configuration(BaseModel):
- chunking: Optional[ConfigurationChunking] = None
- """Configuration for document chunking"""
-
- html_config: Optional[ConfigurationHTMLConfig] = None
- """Configuration for HTML Extraction"""
-
- parsing: Optional[ConfigurationParsing] = None
- """Configuration for document parsing"""
+__all__ = ["Datastore"]
class Datastore(BaseModel):
+ """Datastore output entry with additional fields for public API."""
+
id: str
"""ID of the datastore"""
@@ -102,5 +25,5 @@ class Datastore(BaseModel):
name: str
"""Name of the datastore"""
- configuration: Optional[Configuration] = None
+ configuration: Optional[UnstructuredDatastoreConfigModel] = None
"""Configuration of the datastore"""
diff --git a/src/contextual/types/datastore_create_params.py b/src/contextual/types/datastore_create_params.py
index 0ab8d94..3fd85e1 100644
--- a/src/contextual/types/datastore_create_params.py
+++ b/src/contextual/types/datastore_create_params.py
@@ -2,100 +2,16 @@
from __future__ import annotations
-from typing_extensions import Literal, Required, TypedDict
+from typing_extensions import Required, TypedDict
-__all__ = [
- "DatastoreCreateParams",
- "Configuration",
- "ConfigurationChunking",
- "ConfigurationHTMLConfig",
- "ConfigurationParsing",
-]
+from .unstructured_datastore_config_model_param import UnstructuredDatastoreConfigModelParam
+
+__all__ = ["DatastoreCreateParams"]
class DatastoreCreateParams(TypedDict, total=False):
name: Required[str]
"""Name of the datastore"""
- configuration: Configuration
+ configuration: UnstructuredDatastoreConfigModelParam
"""Configuration of the datastore. If not provided, default configuration is used."""
-
-
-class ConfigurationChunking(TypedDict, total=False):
- chunking_mode: Literal["hierarchy_depth", "hierarchy_heading", "static_length", "page_level"]
- """Chunking mode to use.
-
- Options are: `hierarchy_depth`, `hierarchy_heading`, `static_length`,
- `page_level`. `hierarchy_depth` groups chunks of the same hierarchy level or
- below, additionally merging or splitting based on length constraints.
- `hierarchy_heading` splits chunks at every heading in the document hierarchy,
- additionally merging or splitting based on length constraints. `static_length`
- creates chunks of a fixed length. `page_level` creates chunks that cannot run
- over page boundaries.
- """
-
- enable_hierarchy_based_contextualization: bool
- """Whether to enable section-based contextualization for chunking"""
-
- max_chunk_length_tokens: int
- """Target maximum length of text tokens chunks for chunking.
-
- Chunk length may exceed this value in some edge cases.
- """
-
- min_chunk_length_tokens: int
- """Target minimum length of chunks in tokens.
-
- Must be at least 384 tokens less than `max_chunk_length_tokens`. Chunk length
- may be shorter than this value in some edge cases. Ignored if `chunking_mode` is
- `page_level`.
- """
-
-
-class ConfigurationHTMLConfig(TypedDict, total=False):
- max_chunk_length_tokens: int
- """Target maximum length of text tokens chunks for chunking.
-
- Chunk length may exceed this value in some edge cases.
- """
-
-
-class ConfigurationParsing(TypedDict, total=False):
- enable_split_tables: bool
- """
- Whether to enable table splitting, which splits large tables into smaller tables
- with at most `max_split_table_cells` cells each. In each split table, the table
- headers are reproduced as the first row(s). This is useful for preserving
- context when tables are too large to fit into one chunk.
- """
-
- figure_caption_mode: Literal["default", "custom", "ignore"]
- """Mode for figure captioning.
-
- Options are `default`, `custom`, or `ignore`. Set to `ignore` to disable figure
- captioning. Set to `default` to use the default figure prompt, which generates a
- detailed caption for each figure. Set to `custom` to use a custom prompt.
- """
-
- figure_captioning_prompt: str
- """Prompt to use for generating image captions.
-
- Must be non-empty if `figure_caption_mode` is `custom`. Otherwise, must be null.
- """
-
- max_split_table_cells: int
- """Maximum number of cells for split tables.
-
- Ignored if `enable_split_tables` is False.
- """
-
-
-class Configuration(TypedDict, total=False):
- chunking: ConfigurationChunking
- """Configuration for document chunking"""
-
- html_config: ConfigurationHTMLConfig
- """Configuration for HTML Extraction"""
-
- parsing: ConfigurationParsing
- """Configuration for document parsing"""
diff --git a/src/contextual/types/datastore_delete_response.py b/src/contextual/types/datastore_delete_response.py
new file mode 100644
index 0000000..d32cd47
--- /dev/null
+++ b/src/contextual/types/datastore_delete_response.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .._models import BaseModel
+
+__all__ = ["DatastoreDeleteResponse"]
+
+
+class DatastoreDeleteResponse(BaseModel):
+ pass
diff --git a/src/contextual/types/datastore_metadata.py b/src/contextual/types/datastore_metadata.py
index f1a0035..e934405 100644
--- a/src/contextual/types/datastore_metadata.py
+++ b/src/contextual/types/datastore_metadata.py
@@ -5,98 +5,14 @@
from typing_extensions import Literal
from .._models import BaseModel
+from .unstructured_datastore_config_model import UnstructuredDatastoreConfigModel
-__all__ = [
- "DatastoreMetadata",
- "Configuration",
- "ConfigurationChunking",
- "ConfigurationHTMLConfig",
- "ConfigurationParsing",
- "DatastoreUsages",
-]
-
-
-class ConfigurationChunking(BaseModel):
- chunking_mode: Optional[Literal["hierarchy_depth", "hierarchy_heading", "static_length", "page_level"]] = None
- """Chunking mode to use.
-
- Options are: `hierarchy_depth`, `hierarchy_heading`, `static_length`,
- `page_level`. `hierarchy_depth` groups chunks of the same hierarchy level or
- below, additionally merging or splitting based on length constraints.
- `hierarchy_heading` splits chunks at every heading in the document hierarchy,
- additionally merging or splitting based on length constraints. `static_length`
- creates chunks of a fixed length. `page_level` creates chunks that cannot run
- over page boundaries.
- """
-
- enable_hierarchy_based_contextualization: Optional[bool] = None
- """Whether to enable section-based contextualization for chunking"""
-
- max_chunk_length_tokens: Optional[int] = None
- """Target maximum length of text tokens chunks for chunking.
-
- Chunk length may exceed this value in some edge cases.
- """
-
- min_chunk_length_tokens: Optional[int] = None
- """Target minimum length of chunks in tokens.
-
- Must be at least 384 tokens less than `max_chunk_length_tokens`. Chunk length
- may be shorter than this value in some edge cases. Ignored if `chunking_mode` is
- `page_level`.
- """
-
-
-class ConfigurationHTMLConfig(BaseModel):
- max_chunk_length_tokens: Optional[int] = None
- """Target maximum length of text tokens chunks for chunking.
-
- Chunk length may exceed this value in some edge cases.
- """
-
-
-class ConfigurationParsing(BaseModel):
- enable_split_tables: Optional[bool] = None
- """
- Whether to enable table splitting, which splits large tables into smaller tables
- with at most `max_split_table_cells` cells each. In each split table, the table
- headers are reproduced as the first row(s). This is useful for preserving
- context when tables are too large to fit into one chunk.
- """
-
- figure_caption_mode: Optional[Literal["default", "custom", "ignore"]] = None
- """Mode for figure captioning.
-
- Options are `default`, `custom`, or `ignore`. Set to `ignore` to disable figure
- captioning. Set to `default` to use the default figure prompt, which generates a
- detailed caption for each figure. Set to `custom` to use a custom prompt.
- """
-
- figure_captioning_prompt: Optional[str] = None
- """Prompt to use for generating image captions.
-
- Must be non-empty if `figure_caption_mode` is `custom`. Otherwise, must be null.
- """
-
- max_split_table_cells: Optional[int] = None
- """Maximum number of cells for split tables.
-
- Ignored if `enable_split_tables` is False.
- """
-
-
-class Configuration(BaseModel):
- chunking: Optional[ConfigurationChunking] = None
- """Configuration for document chunking"""
-
- html_config: Optional[ConfigurationHTMLConfig] = None
- """Configuration for HTML Extraction"""
-
- parsing: Optional[ConfigurationParsing] = None
- """Configuration for document parsing"""
+__all__ = ["DatastoreMetadata", "DatastoreUsages"]
class DatastoreUsages(BaseModel):
+ """Datastore usage"""
+
size_gb: float
"""Actual size of the datastore in GB"""
@@ -111,7 +27,7 @@ class DatastoreMetadata(BaseModel):
name: str
"""Name of the datastore"""
- configuration: Optional[Configuration] = None
+ configuration: Optional[UnstructuredDatastoreConfigModel] = None
"""Configuration for unstructured datastores."""
datastore_type: Optional[Literal["UNSTRUCTURED"]] = None
diff --git a/src/contextual/types/datastore_parse_configuration.py b/src/contextual/types/datastore_parse_configuration.py
new file mode 100644
index 0000000..d8f8816
--- /dev/null
+++ b/src/contextual/types/datastore_parse_configuration.py
@@ -0,0 +1,43 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["DatastoreParseConfiguration"]
+
+
+class DatastoreParseConfiguration(BaseModel):
+ """Configuration for data extraction settings from documents at datastore level.
+
+ Controls settings for document parsing. Includes those from `/parse` API along with some extra ingestion-only ones.
+ """
+
+ enable_split_tables: Optional[bool] = None
+ """
+ Whether to enable table splitting, which splits large tables into smaller tables
+ with at most `max_split_table_cells` cells each. In each split table, the table
+ headers are reproduced as the first row(s). This is useful for preserving
+ context when tables are too large to fit into one chunk.
+ """
+
+ figure_caption_mode: Optional[Literal["default", "custom", "ignore"]] = None
+ """Mode for figure captioning.
+
+ Options are `default`, `custom`, or `ignore`. Set to `ignore` to disable figure
+ captioning. Set to `default` to use the default figure prompt, which generates a
+ detailed caption for each figure. Set to `custom` to use a custom prompt.
+ """
+
+ figure_captioning_prompt: Optional[str] = None
+ """Prompt to use for generating image captions.
+
+ Must be non-empty if `figure_caption_mode` is `custom`. Otherwise, must be null.
+ """
+
+ max_split_table_cells: Optional[int] = None
+ """Maximum number of cells for split tables.
+
+ Ignored if `enable_split_tables` is False.
+ """
diff --git a/src/contextual/types/datastore_parse_configuration_param.py b/src/contextual/types/datastore_parse_configuration_param.py
new file mode 100644
index 0000000..312e2e2
--- /dev/null
+++ b/src/contextual/types/datastore_parse_configuration_param.py
@@ -0,0 +1,42 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["DatastoreParseConfigurationParam"]
+
+
+class DatastoreParseConfigurationParam(TypedDict, total=False):
+ """Configuration for data extraction settings from documents at datastore level.
+
+ Controls settings for document parsing. Includes those from `/parse` API along with some extra ingestion-only ones.
+ """
+
+ enable_split_tables: bool
+ """
+ Whether to enable table splitting, which splits large tables into smaller tables
+ with at most `max_split_table_cells` cells each. In each split table, the table
+ headers are reproduced as the first row(s). This is useful for preserving
+ context when tables are too large to fit into one chunk.
+ """
+
+ figure_caption_mode: Literal["default", "custom", "ignore"]
+ """Mode for figure captioning.
+
+ Options are `default`, `custom`, or `ignore`. Set to `ignore` to disable figure
+ captioning. Set to `default` to use the default figure prompt, which generates a
+ detailed caption for each figure. Set to `custom` to use a custom prompt.
+ """
+
+ figure_captioning_prompt: str
+ """Prompt to use for generating image captions.
+
+ Must be non-empty if `figure_caption_mode` is `custom`. Otherwise, must be null.
+ """
+
+ max_split_table_cells: int
+ """Maximum number of cells for split tables.
+
+ Ignored if `enable_split_tables` is False.
+ """
diff --git a/src/contextual/types/datastore_reset_response.py b/src/contextual/types/datastore_reset_response.py
new file mode 100644
index 0000000..5e8acc7
--- /dev/null
+++ b/src/contextual/types/datastore_reset_response.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .._models import BaseModel
+
+__all__ = ["DatastoreResetResponse"]
+
+
+class DatastoreResetResponse(BaseModel):
+ pass
diff --git a/src/contextual/types/datastore_update_params.py b/src/contextual/types/datastore_update_params.py
index 8a4e448..ae18f3a 100644
--- a/src/contextual/types/datastore_update_params.py
+++ b/src/contextual/types/datastore_update_params.py
@@ -2,19 +2,15 @@
from __future__ import annotations
-from typing_extensions import Literal, TypedDict
+from typing_extensions import TypedDict
-__all__ = [
- "DatastoreUpdateParams",
- "Configuration",
- "ConfigurationChunking",
- "ConfigurationHTMLConfig",
- "ConfigurationParsing",
-]
+from .unstructured_datastore_config_model_param import UnstructuredDatastoreConfigModelParam
+
+__all__ = ["DatastoreUpdateParams"]
class DatastoreUpdateParams(TypedDict, total=False):
- configuration: Configuration
+ configuration: UnstructuredDatastoreConfigModelParam
"""Configuration of the datastore.
If not provided, current configuration is retained.
@@ -22,83 +18,3 @@ class DatastoreUpdateParams(TypedDict, total=False):
name: str
"""Name of the datastore"""
-
-
-class ConfigurationChunking(TypedDict, total=False):
- chunking_mode: Literal["hierarchy_depth", "hierarchy_heading", "static_length", "page_level"]
- """Chunking mode to use.
-
- Options are: `hierarchy_depth`, `hierarchy_heading`, `static_length`,
- `page_level`. `hierarchy_depth` groups chunks of the same hierarchy level or
- below, additionally merging or splitting based on length constraints.
- `hierarchy_heading` splits chunks at every heading in the document hierarchy,
- additionally merging or splitting based on length constraints. `static_length`
- creates chunks of a fixed length. `page_level` creates chunks that cannot run
- over page boundaries.
- """
-
- enable_hierarchy_based_contextualization: bool
- """Whether to enable section-based contextualization for chunking"""
-
- max_chunk_length_tokens: int
- """Target maximum length of text tokens chunks for chunking.
-
- Chunk length may exceed this value in some edge cases.
- """
-
- min_chunk_length_tokens: int
- """Target minimum length of chunks in tokens.
-
- Must be at least 384 tokens less than `max_chunk_length_tokens`. Chunk length
- may be shorter than this value in some edge cases. Ignored if `chunking_mode` is
- `page_level`.
- """
-
-
-class ConfigurationHTMLConfig(TypedDict, total=False):
- max_chunk_length_tokens: int
- """Target maximum length of text tokens chunks for chunking.
-
- Chunk length may exceed this value in some edge cases.
- """
-
-
-class ConfigurationParsing(TypedDict, total=False):
- enable_split_tables: bool
- """
- Whether to enable table splitting, which splits large tables into smaller tables
- with at most `max_split_table_cells` cells each. In each split table, the table
- headers are reproduced as the first row(s). This is useful for preserving
- context when tables are too large to fit into one chunk.
- """
-
- figure_caption_mode: Literal["default", "custom", "ignore"]
- """Mode for figure captioning.
-
- Options are `default`, `custom`, or `ignore`. Set to `ignore` to disable figure
- captioning. Set to `default` to use the default figure prompt, which generates a
- detailed caption for each figure. Set to `custom` to use a custom prompt.
- """
-
- figure_captioning_prompt: str
- """Prompt to use for generating image captions.
-
- Must be non-empty if `figure_caption_mode` is `custom`. Otherwise, must be null.
- """
-
- max_split_table_cells: int
- """Maximum number of cells for split tables.
-
- Ignored if `enable_split_tables` is False.
- """
-
-
-class Configuration(TypedDict, total=False):
- chunking: ConfigurationChunking
- """Configuration for document chunking"""
-
- html_config: ConfigurationHTMLConfig
- """Configuration for HTML Extraction"""
-
- parsing: ConfigurationParsing
- """Configuration for document parsing"""
diff --git a/src/contextual/types/datastores/__init__.py b/src/contextual/types/datastores/__init__.py
index 764a27d..0c8c117 100644
--- a/src/contextual/types/datastores/__init__.py
+++ b/src/contextual/types/datastores/__init__.py
@@ -11,10 +11,13 @@
from .document_ingest_params import DocumentIngestParams as DocumentIngestParams
from .content_metadata_params import ContentMetadataParams as ContentMetadataParams
from .list_documents_response import ListDocumentsResponse as ListDocumentsResponse
+from .document_delete_response import DocumentDeleteResponse as DocumentDeleteResponse
from .composite_metadata_filter import CompositeMetadataFilter as CompositeMetadataFilter
from .content_metadata_response import ContentMetadataResponse as ContentMetadataResponse
from .base_metadata_filter_param import BaseMetadataFilterParam as BaseMetadataFilterParam
+from .chunk_update_content_params import ChunkUpdateContentParams as ChunkUpdateContentParams
from .document_set_metadata_params import DocumentSetMetadataParams as DocumentSetMetadataParams
+from .chunk_update_content_response import ChunkUpdateContentResponse as ChunkUpdateContentResponse
from .composite_metadata_filter_param import CompositeMetadataFilterParam as CompositeMetadataFilterParam
from .document_get_parse_result_params import DocumentGetParseResultParams as DocumentGetParseResultParams
from .document_get_parse_result_response import DocumentGetParseResultResponse as DocumentGetParseResultResponse
diff --git a/src/contextual/types/datastores/base_metadata_filter.py b/src/contextual/types/datastores/base_metadata_filter.py
index 8843ee5..91f59bc 100644
--- a/src/contextual/types/datastores/base_metadata_filter.py
+++ b/src/contextual/types/datastores/base_metadata_filter.py
@@ -9,6 +9,15 @@
class BaseMetadataFilter(BaseModel):
+ """Defines a custom metadata filter.
+
+ The expected input is a dict which can have different operators, fields and values. For example:
+
+ {"field": "title", "operator": "startswith", "value": "hr-"}
+
+ Use **lowercase** for `value` when not using `equals` operator. For document_id and date_created the query is built using direct query without nesting.
+ """
+
field: str
"""Field name to search for in the metadata"""
diff --git a/src/contextual/types/datastores/base_metadata_filter_param.py b/src/contextual/types/datastores/base_metadata_filter_param.py
index 0006f7f..77a7c71 100644
--- a/src/contextual/types/datastores/base_metadata_filter_param.py
+++ b/src/contextual/types/datastores/base_metadata_filter_param.py
@@ -11,6 +11,15 @@
class BaseMetadataFilterParam(TypedDict, total=False):
+ """Defines a custom metadata filter.
+
+ The expected input is a dict which can have different operators, fields and values. For example:
+
+ {"field": "title", "operator": "startswith", "value": "hr-"}
+
+ Use **lowercase** for `value` when not using `equals` operator. For document_id and date_created the query is built using direct query without nesting.
+ """
+
field: Required[str]
"""Field name to search for in the metadata"""
diff --git a/src/contextual/types/datastores/chunk_update_content_params.py b/src/contextual/types/datastores/chunk_update_content_params.py
new file mode 100644
index 0000000..e30965a
--- /dev/null
+++ b/src/contextual/types/datastores/chunk_update_content_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ChunkUpdateContentParams"]
+
+
+class ChunkUpdateContentParams(TypedDict, total=False):
+ datastore_id: Required[str]
+ """Datastore ID of the datastore containing the chunk"""
+
+ content: Required[str]
+ """The new content text for the chunk"""
diff --git a/src/contextual/types/datastores/chunk_update_content_response.py b/src/contextual/types/datastores/chunk_update_content_response.py
new file mode 100644
index 0000000..c0560a9
--- /dev/null
+++ b/src/contextual/types/datastores/chunk_update_content_response.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ..._models import BaseModel
+
+__all__ = ["ChunkUpdateContentResponse"]
+
+
+class ChunkUpdateContentResponse(BaseModel):
+ pass
diff --git a/src/contextual/types/datastores/composite_metadata_filter.py b/src/contextual/types/datastores/composite_metadata_filter.py
index 049f45a..733c7b4 100644
--- a/src/contextual/types/datastores/composite_metadata_filter.py
+++ b/src/contextual/types/datastores/composite_metadata_filter.py
@@ -18,6 +18,11 @@
class CompositeMetadataFilter(BaseModel):
+ """\"Defines a custom metadata filter as a Composite MetadataFilter.
+
+ Which can be be a list of filters or nested filters.
+ """
+
filters: List[Filter]
"""Filters added to the query for filtering docs"""
diff --git a/src/contextual/types/datastores/composite_metadata_filter_param.py b/src/contextual/types/datastores/composite_metadata_filter_param.py
index b5a11f8..0810e59 100644
--- a/src/contextual/types/datastores/composite_metadata_filter_param.py
+++ b/src/contextual/types/datastores/composite_metadata_filter_param.py
@@ -17,6 +17,11 @@
class CompositeMetadataFilterParam(TypedDict, total=False):
+ """\"Defines a custom metadata filter as a Composite MetadataFilter.
+
+ Which can be be a list of filters or nested filters.
+ """
+
filters: Required[Iterable[Filter]]
"""Filters added to the query for filtering docs"""
diff --git a/src/contextual/types/datastores/content_list_response.py b/src/contextual/types/datastores/content_list_response.py
index 36766fd..343ef37 100644
--- a/src/contextual/types/datastores/content_list_response.py
+++ b/src/contextual/types/datastores/content_list_response.py
@@ -22,6 +22,8 @@ class DocumentContentEntry(BaseModel):
class StructuredContentEntry(BaseModel):
+ """Tabular content entry used in query retrieval."""
+
content_id: str
"""ID of the content"""
diff --git a/src/contextual/types/datastores/document_delete_response.py b/src/contextual/types/datastores/document_delete_response.py
new file mode 100644
index 0000000..328b1aa
--- /dev/null
+++ b/src/contextual/types/datastores/document_delete_response.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ..._models import BaseModel
+
+__all__ = ["DocumentDeleteResponse"]
+
+
+class DocumentDeleteResponse(BaseModel):
+ pass
diff --git a/src/contextual/types/datastores/document_get_parse_result_response.py b/src/contextual/types/datastores/document_get_parse_result_response.py
index 18e64c8..c930a8a 100644
--- a/src/contextual/types/datastores/document_get_parse_result_response.py
+++ b/src/contextual/types/datastores/document_get_parse_result_response.py
@@ -4,78 +4,17 @@
from typing_extensions import Literal
from ..._models import BaseModel
+from ..shared.parsed_block import ParsedBlock
-__all__ = [
- "DocumentGetParseResultResponse",
- "DocumentMetadata",
- "DocumentMetadataHierarchy",
- "DocumentMetadataHierarchyBlock",
- "DocumentMetadataHierarchyBlockBoundingBox",
- "Page",
- "PageBlock",
- "PageBlockBoundingBox",
-]
+__all__ = ["DocumentGetParseResultResponse", "DocumentMetadata", "DocumentMetadataHierarchy", "Page"]
-class DocumentMetadataHierarchyBlockBoundingBox(BaseModel):
- x0: float
- """The x-coordinate of the top-left corner of the bounding box"""
-
- x1: float
- """The x-coordinate of the bottom-right corner of the bounding box"""
-
- y0: float
- """The y-coordinate of the top-left corner of the bounding box"""
-
- y1: float
- """The y-coordinate of the bottom-right corner of the bounding box"""
-
-
-class DocumentMetadataHierarchyBlock(BaseModel):
- id: str
- """Unique ID of the block"""
-
- bounding_box: DocumentMetadataHierarchyBlockBoundingBox
- """
- The normalized bounding box of the block, as relative percentages of the page
- width and height
- """
-
- markdown: str
- """The Markdown representation of the block"""
-
- type: Literal["heading", "text", "table", "figure"]
- """The type of the block"""
-
- confidence_level: Optional[Literal["low", "medium", "high"]] = None
- """The confidence level of this block categorized as 'low', 'medium', or 'high'.
-
- Only available for blocks of type 'table' currently.
- """
-
- hierarchy_level: Optional[int] = None
- """
- The level of the block in the document hierarchy, starting at 0 for the
- root-level title block. Only present if `enable_document_hierarchy` was set to
- true in the request.
- """
-
- page_index: Optional[int] = None
- """The page (0-indexed) that this block belongs to.
-
- Only set for heading blocks that are returned in the table of contents.
- """
-
- parent_ids: Optional[List[str]] = None
+class DocumentMetadataHierarchy(BaseModel):
"""
- The IDs of the parent in the document hierarchy, sorted from root-level to
- bottom. For root-level heading blocks, this will be an empty list. Only present
- if `enable_document_hierarchy` was set to true in the request.
+ Hierarchy of the document, as both heading blocks and a markdown table of contents
"""
-
-class DocumentMetadataHierarchy(BaseModel):
- blocks: Optional[List[DocumentMetadataHierarchyBlock]] = None
+ blocks: Optional[List[ParsedBlock]] = None
"""Heading blocks which define the hierarchy of the document"""
table_of_contents: Optional[str] = None
@@ -83,6 +22,8 @@ class DocumentMetadataHierarchy(BaseModel):
class DocumentMetadata(BaseModel):
+ """Document-level metadata parsed from the document"""
+
hierarchy: Optional[DocumentMetadataHierarchy] = None
"""
Hierarchy of the document, as both heading blocks and a markdown table of
@@ -90,68 +31,13 @@ class DocumentMetadata(BaseModel):
"""
-class PageBlockBoundingBox(BaseModel):
- x0: float
- """The x-coordinate of the top-left corner of the bounding box"""
-
- x1: float
- """The x-coordinate of the bottom-right corner of the bounding box"""
-
- y0: float
- """The y-coordinate of the top-left corner of the bounding box"""
-
- y1: float
- """The y-coordinate of the bottom-right corner of the bounding box"""
-
-
-class PageBlock(BaseModel):
- id: str
- """Unique ID of the block"""
-
- bounding_box: PageBlockBoundingBox
- """
- The normalized bounding box of the block, as relative percentages of the page
- width and height
- """
-
- markdown: str
- """The Markdown representation of the block"""
-
- type: Literal["heading", "text", "table", "figure"]
- """The type of the block"""
-
- confidence_level: Optional[Literal["low", "medium", "high"]] = None
- """The confidence level of this block categorized as 'low', 'medium', or 'high'.
-
- Only available for blocks of type 'table' currently.
- """
-
- hierarchy_level: Optional[int] = None
- """
- The level of the block in the document hierarchy, starting at 0 for the
- root-level title block. Only present if `enable_document_hierarchy` was set to
- true in the request.
- """
-
- page_index: Optional[int] = None
- """The page (0-indexed) that this block belongs to.
-
- Only set for heading blocks that are returned in the table of contents.
- """
-
- parent_ids: Optional[List[str]] = None
- """
- The IDs of the parent in the document hierarchy, sorted from root-level to
- bottom. For root-level heading blocks, this will be an empty list. Only present
- if `enable_document_hierarchy` was set to true in the request.
- """
-
-
class Page(BaseModel):
+ """Per-page parse results."""
+
index: int
"""The index of the parsed page (zero-indexed)"""
- blocks: Optional[List[PageBlock]] = None
+ blocks: Optional[List[ParsedBlock]] = None
"""The parsed, structured blocks of this page.
Present if `blocks-per-page` was among the requested output types.
@@ -165,6 +51,8 @@ class Page(BaseModel):
class DocumentGetParseResultResponse(BaseModel):
+ """/parse results reponse object."""
+
file_name: str
"""The name of the file that was uploaded for parsing"""
diff --git a/src/contextual/types/datastores/document_metadata.py b/src/contextual/types/datastores/document_metadata.py
index c1bdd5a..69b6bec 100644
--- a/src/contextual/types/datastores/document_metadata.py
+++ b/src/contextual/types/datastores/document_metadata.py
@@ -26,6 +26,8 @@ class CustomMetadataConfig(BaseModel):
class DocumentMetadata(BaseModel):
+ """Document description"""
+
id: str
"""ID of the document that was ingested"""
diff --git a/src/contextual/types/datastores/ingestion_response.py b/src/contextual/types/datastores/ingestion_response.py
index cd8558f..e1bd832 100644
--- a/src/contextual/types/datastores/ingestion_response.py
+++ b/src/contextual/types/datastores/ingestion_response.py
@@ -6,5 +6,7 @@
class IngestionResponse(BaseModel):
+ """Response body from POST /data/documents"""
+
id: str
"""ID of the document being ingested"""
diff --git a/src/contextual/types/datastores/list_documents_response.py b/src/contextual/types/datastores/list_documents_response.py
index 8f0be4a..064a061 100644
--- a/src/contextual/types/datastores/list_documents_response.py
+++ b/src/contextual/types/datastores/list_documents_response.py
@@ -9,6 +9,8 @@
class ListDocumentsResponse(BaseModel):
+ """Response body from GET /data/documents"""
+
documents: List[DocumentMetadata]
"""List of documents retrieved based on the user's GET request"""
diff --git a/src/contextual/types/filter_and_rerank_config.py b/src/contextual/types/filter_and_rerank_config.py
index 5057861..9c293f8 100644
--- a/src/contextual/types/filter_and_rerank_config.py
+++ b/src/contextual/types/filter_and_rerank_config.py
@@ -14,6 +14,8 @@
class FilterAndRerankConfig(BaseModel):
+ """Captures Filter and Rerank configurations for an Agent"""
+
default_metadata_filters: Optional[DefaultMetadataFilters] = None
"""
Optional metadata filter which is applied while retrieving from every datastore
diff --git a/src/contextual/types/filter_and_rerank_config_param.py b/src/contextual/types/filter_and_rerank_config_param.py
index 415293e..ff43d8f 100644
--- a/src/contextual/types/filter_and_rerank_config_param.py
+++ b/src/contextual/types/filter_and_rerank_config_param.py
@@ -13,6 +13,8 @@
class FilterAndRerankConfigParam(TypedDict, total=False):
+ """Captures Filter and Rerank configurations for an Agent"""
+
default_metadata_filters: DefaultMetadataFilters
"""
Optional metadata filter which is applied while retrieving from every datastore
diff --git a/src/contextual/types/generate_create_params.py b/src/contextual/types/generate_create_params.py
index 662a561..1233eec 100644
--- a/src/contextual/types/generate_create_params.py
+++ b/src/contextual/types/generate_create_params.py
@@ -56,6 +56,8 @@ class GenerateCreateParams(TypedDict, total=False):
class Message(TypedDict, total=False):
+ """Message object for a message received in the /generate request"""
+
content: Required[str]
"""Content of the message"""
diff --git a/src/contextual/types/generate_create_response.py b/src/contextual/types/generate_create_response.py
index 25b08a7..46d8dd7 100644
--- a/src/contextual/types/generate_create_response.py
+++ b/src/contextual/types/generate_create_response.py
@@ -6,5 +6,7 @@
class GenerateCreateResponse(BaseModel):
+ """/generate result object."""
+
response: str
"""The model's response to the last user message."""
diff --git a/src/contextual/types/generate_response_config.py b/src/contextual/types/generate_response_config.py
index 4635750..62fc3a8 100644
--- a/src/contextual/types/generate_response_config.py
+++ b/src/contextual/types/generate_response_config.py
@@ -8,6 +8,8 @@
class GenerateResponseConfig(BaseModel):
+ """Captures advance LLM configurations for an Agent"""
+
avoid_commentary: Optional[bool] = None
"""
Flag to indicate whether the model should avoid providing additional commentary
diff --git a/src/contextual/types/generate_response_config_param.py b/src/contextual/types/generate_response_config_param.py
index ea39181..0ed9965 100644
--- a/src/contextual/types/generate_response_config_param.py
+++ b/src/contextual/types/generate_response_config_param.py
@@ -8,6 +8,8 @@
class GenerateResponseConfigParam(TypedDict, total=False):
+ """Captures advance LLM configurations for an Agent"""
+
avoid_commentary: bool
"""
Flag to indicate whether the model should avoid providing additional commentary
diff --git a/src/contextual/types/global_config.py b/src/contextual/types/global_config.py
index 716edde..1252bec 100644
--- a/src/contextual/types/global_config.py
+++ b/src/contextual/types/global_config.py
@@ -8,6 +8,8 @@
class GlobalConfig(BaseModel):
+ """Captures global configs"""
+
enable_filter: Optional[bool] = None
"""Enables filtering of retrieved chunks with a separate LLM"""
diff --git a/src/contextual/types/global_config_param.py b/src/contextual/types/global_config_param.py
index e09fa3a..625b81d 100644
--- a/src/contextual/types/global_config_param.py
+++ b/src/contextual/types/global_config_param.py
@@ -8,6 +8,8 @@
class GlobalConfigParam(TypedDict, total=False):
+ """Captures global configs"""
+
enable_filter: bool
"""Enables filtering of retrieved chunks with a separate LLM"""
diff --git a/src/contextual/types/html_configuration.py b/src/contextual/types/html_configuration.py
new file mode 100644
index 0000000..dfcfada
--- /dev/null
+++ b/src/contextual/types/html_configuration.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["HTMLConfiguration"]
+
+
+class HTMLConfiguration(BaseModel):
+ """Configuration for HTML document ingestion settings."""
+
+ max_chunk_length_tokens: Optional[int] = None
+ """Target maximum length of text tokens chunks for chunking.
+
+ Chunk length may exceed this value in some edge cases.
+ """
diff --git a/src/contextual/types/html_configuration_param.py b/src/contextual/types/html_configuration_param.py
new file mode 100644
index 0000000..4dd6cea
--- /dev/null
+++ b/src/contextual/types/html_configuration_param.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["HTMLConfigurationParam"]
+
+
+class HTMLConfigurationParam(TypedDict, total=False):
+ """Configuration for HTML document ingestion settings."""
+
+ max_chunk_length_tokens: int
+ """Target maximum length of text tokens chunks for chunking.
+
+ Chunk length may exceed this value in some edge cases.
+ """
diff --git a/src/contextual/types/list_users_response.py b/src/contextual/types/list_users_response.py
index 2cebb63..861511a 100644
--- a/src/contextual/types/list_users_response.py
+++ b/src/contextual/types/list_users_response.py
@@ -9,6 +9,8 @@
class UserPerAgentRole(BaseModel):
+ """The schema used to capture agent level roles"""
+
agent_id: str
"""ID of the agent on which to grant/revoke the role."""
@@ -20,6 +22,8 @@ class UserPerAgentRole(BaseModel):
class User(BaseModel):
+ """The schema used for listing existing (activated / deactivated) users."""
+
id: str
email: str
diff --git a/src/contextual/types/lmunit_create_response.py b/src/contextual/types/lmunit_create_response.py
index 37c1d6b..6030d12 100644
--- a/src/contextual/types/lmunit_create_response.py
+++ b/src/contextual/types/lmunit_create_response.py
@@ -6,6 +6,8 @@
class LMUnitCreateResponse(BaseModel):
+ """LMUnit result object."""
+
score: float
"""The response is scored on a continuous scale from 1 to 5 on the unit test.
diff --git a/src/contextual/types/new_user_param.py b/src/contextual/types/new_user_param.py
index 027d759..ae8f210 100644
--- a/src/contextual/types/new_user_param.py
+++ b/src/contextual/types/new_user_param.py
@@ -9,6 +9,8 @@
class PerAgentRole(TypedDict, total=False):
+ """The schema used to capture agent level roles"""
+
agent_id: Required[str]
"""ID of the agent on which to grant/revoke the role."""
@@ -20,6 +22,8 @@ class PerAgentRole(TypedDict, total=False):
class NewUserParam(TypedDict, total=False):
+ """The schema used for creating new users or updating existing users."""
+
email: Required[str]
"""The email of the user"""
diff --git a/src/contextual/types/parse_create_response.py b/src/contextual/types/parse_create_response.py
index 168854b..d255e0c 100644
--- a/src/contextual/types/parse_create_response.py
+++ b/src/contextual/types/parse_create_response.py
@@ -6,5 +6,7 @@
class ParseCreateResponse(BaseModel):
+ """/parse response object."""
+
job_id: str
"""Unique ID of the parse job"""
diff --git a/src/contextual/types/parse_job_results_response.py b/src/contextual/types/parse_job_results_response.py
index 8d1565e..c7b609c 100644
--- a/src/contextual/types/parse_job_results_response.py
+++ b/src/contextual/types/parse_job_results_response.py
@@ -4,78 +4,17 @@
from typing_extensions import Literal
from .._models import BaseModel
+from .shared.parsed_block import ParsedBlock
-__all__ = [
- "ParseJobResultsResponse",
- "DocumentMetadata",
- "DocumentMetadataHierarchy",
- "DocumentMetadataHierarchyBlock",
- "DocumentMetadataHierarchyBlockBoundingBox",
- "Page",
- "PageBlock",
- "PageBlockBoundingBox",
-]
+__all__ = ["ParseJobResultsResponse", "DocumentMetadata", "DocumentMetadataHierarchy", "Page"]
-class DocumentMetadataHierarchyBlockBoundingBox(BaseModel):
- x0: float
- """The x-coordinate of the top-left corner of the bounding box"""
-
- x1: float
- """The x-coordinate of the bottom-right corner of the bounding box"""
-
- y0: float
- """The y-coordinate of the top-left corner of the bounding box"""
-
- y1: float
- """The y-coordinate of the bottom-right corner of the bounding box"""
-
-
-class DocumentMetadataHierarchyBlock(BaseModel):
- id: str
- """Unique ID of the block"""
-
- bounding_box: DocumentMetadataHierarchyBlockBoundingBox
- """
- The normalized bounding box of the block, as relative percentages of the page
- width and height
- """
-
- markdown: str
- """The Markdown representation of the block"""
-
- type: Literal["heading", "text", "table", "figure"]
- """The type of the block"""
-
- confidence_level: Optional[Literal["low", "medium", "high"]] = None
- """The confidence level of this block categorized as 'low', 'medium', or 'high'.
-
- Only available for blocks of type 'table' currently.
- """
-
- hierarchy_level: Optional[int] = None
- """
- The level of the block in the document hierarchy, starting at 0 for the
- root-level title block. Only present if `enable_document_hierarchy` was set to
- true in the request.
- """
-
- page_index: Optional[int] = None
- """The page (0-indexed) that this block belongs to.
-
- Only set for heading blocks that are returned in the table of contents.
- """
-
- parent_ids: Optional[List[str]] = None
+class DocumentMetadataHierarchy(BaseModel):
"""
- The IDs of the parent in the document hierarchy, sorted from root-level to
- bottom. For root-level heading blocks, this will be an empty list. Only present
- if `enable_document_hierarchy` was set to true in the request.
+ Hierarchy of the document, as both heading blocks and a markdown table of contents
"""
-
-class DocumentMetadataHierarchy(BaseModel):
- blocks: Optional[List[DocumentMetadataHierarchyBlock]] = None
+ blocks: Optional[List[ParsedBlock]] = None
"""Heading blocks which define the hierarchy of the document"""
table_of_contents: Optional[str] = None
@@ -83,6 +22,8 @@ class DocumentMetadataHierarchy(BaseModel):
class DocumentMetadata(BaseModel):
+ """Document-level metadata parsed from the document"""
+
hierarchy: Optional[DocumentMetadataHierarchy] = None
"""
Hierarchy of the document, as both heading blocks and a markdown table of
@@ -90,68 +31,13 @@ class DocumentMetadata(BaseModel):
"""
-class PageBlockBoundingBox(BaseModel):
- x0: float
- """The x-coordinate of the top-left corner of the bounding box"""
-
- x1: float
- """The x-coordinate of the bottom-right corner of the bounding box"""
-
- y0: float
- """The y-coordinate of the top-left corner of the bounding box"""
-
- y1: float
- """The y-coordinate of the bottom-right corner of the bounding box"""
-
-
-class PageBlock(BaseModel):
- id: str
- """Unique ID of the block"""
-
- bounding_box: PageBlockBoundingBox
- """
- The normalized bounding box of the block, as relative percentages of the page
- width and height
- """
-
- markdown: str
- """The Markdown representation of the block"""
-
- type: Literal["heading", "text", "table", "figure"]
- """The type of the block"""
-
- confidence_level: Optional[Literal["low", "medium", "high"]] = None
- """The confidence level of this block categorized as 'low', 'medium', or 'high'.
-
- Only available for blocks of type 'table' currently.
- """
-
- hierarchy_level: Optional[int] = None
- """
- The level of the block in the document hierarchy, starting at 0 for the
- root-level title block. Only present if `enable_document_hierarchy` was set to
- true in the request.
- """
-
- page_index: Optional[int] = None
- """The page (0-indexed) that this block belongs to.
-
- Only set for heading blocks that are returned in the table of contents.
- """
-
- parent_ids: Optional[List[str]] = None
- """
- The IDs of the parent in the document hierarchy, sorted from root-level to
- bottom. For root-level heading blocks, this will be an empty list. Only present
- if `enable_document_hierarchy` was set to true in the request.
- """
-
-
class Page(BaseModel):
+ """Per-page parse results."""
+
index: int
"""The index of the parsed page (zero-indexed)"""
- blocks: Optional[List[PageBlock]] = None
+ blocks: Optional[List[ParsedBlock]] = None
"""The parsed, structured blocks of this page.
Present if `blocks-per-page` was among the requested output types.
@@ -165,6 +51,8 @@ class Page(BaseModel):
class ParseJobResultsResponse(BaseModel):
+ """/parse results reponse object."""
+
file_name: str
"""The name of the file that was uploaded for parsing"""
diff --git a/src/contextual/types/parse_job_status_response.py b/src/contextual/types/parse_job_status_response.py
index 768ccfd..18333cb 100644
--- a/src/contextual/types/parse_job_status_response.py
+++ b/src/contextual/types/parse_job_status_response.py
@@ -8,6 +8,8 @@
class ParseJobStatusResponse(BaseModel):
+ """/parse status reponse object."""
+
file_name: str
"""The name of the file that was uploaded for parsing"""
diff --git a/src/contextual/types/parse_jobs_response.py b/src/contextual/types/parse_jobs_response.py
index d511f7f..42acdbd 100644
--- a/src/contextual/types/parse_jobs_response.py
+++ b/src/contextual/types/parse_jobs_response.py
@@ -20,6 +20,8 @@ class Job(BaseModel):
class ParseJobsResponse(BaseModel):
+ """/parse list jobs object."""
+
jobs: List[Job]
"""List of parse jobs"""
diff --git a/src/contextual/types/reformulation_config.py b/src/contextual/types/reformulation_config.py
new file mode 100644
index 0000000..bfac831
--- /dev/null
+++ b/src/contextual/types/reformulation_config.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["ReformulationConfig"]
+
+
+class ReformulationConfig(BaseModel):
+ """Captures Query Reformulation configurations for an Agent"""
+
+ enable_query_decomposition: Optional[bool] = None
+ """Whether to enable query decomposition."""
+
+ enable_query_expansion: Optional[bool] = None
+ """Whether to enable query expansion."""
+
+ query_decomposition_prompt: Optional[str] = None
+ """The prompt to use for query decomposition."""
+
+ query_expansion_prompt: Optional[str] = None
+ """The prompt to use for query expansion."""
diff --git a/src/contextual/types/reformulation_config_param.py b/src/contextual/types/reformulation_config_param.py
new file mode 100644
index 0000000..37e043b
--- /dev/null
+++ b/src/contextual/types/reformulation_config_param.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["ReformulationConfigParam"]
+
+
+class ReformulationConfigParam(TypedDict, total=False):
+ """Captures Query Reformulation configurations for an Agent"""
+
+ enable_query_decomposition: bool
+ """Whether to enable query decomposition."""
+
+ enable_query_expansion: bool
+ """Whether to enable query expansion."""
+
+ query_decomposition_prompt: str
+ """The prompt to use for query decomposition."""
+
+ query_expansion_prompt: str
+ """The prompt to use for query expansion."""
diff --git a/src/contextual/types/rerank_create_response.py b/src/contextual/types/rerank_create_response.py
index 091d3c7..1fbf4f5 100644
--- a/src/contextual/types/rerank_create_response.py
+++ b/src/contextual/types/rerank_create_response.py
@@ -8,6 +8,8 @@
class Result(BaseModel):
+ """Reranked result object."""
+
index: int
"""Index of the document in the input list, starting with 0"""
@@ -23,6 +25,8 @@ class Result(BaseModel):
class RerankCreateResponse(BaseModel):
+ """Rerank output response."""
+
results: List[Result]
"""
The ranked list of documents containing the index of the document and the
diff --git a/src/contextual/types/retrieval_config.py b/src/contextual/types/retrieval_config.py
index 663748f..9741455 100644
--- a/src/contextual/types/retrieval_config.py
+++ b/src/contextual/types/retrieval_config.py
@@ -8,6 +8,8 @@
class RetrievalConfig(BaseModel):
+ """Captures Retrieval configurations for an Agent"""
+
lexical_alpha: Optional[float] = None
"""The weight of lexical search during retrieval.
diff --git a/src/contextual/types/retrieval_config_param.py b/src/contextual/types/retrieval_config_param.py
index 9fa0b20..149b6b7 100644
--- a/src/contextual/types/retrieval_config_param.py
+++ b/src/contextual/types/retrieval_config_param.py
@@ -8,6 +8,8 @@
class RetrievalConfigParam(TypedDict, total=False):
+ """Captures Retrieval configurations for an Agent"""
+
lexical_alpha: float
"""The weight of lexical search during retrieval.
diff --git a/src/contextual/types/shared/__init__.py b/src/contextual/types/shared/__init__.py
new file mode 100644
index 0000000..8cabe35
--- /dev/null
+++ b/src/contextual/types/shared/__init__.py
@@ -0,0 +1,3 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .parsed_block import ParsedBlock as ParsedBlock
diff --git a/src/contextual/types/shared/parsed_block.py b/src/contextual/types/shared/parsed_block.py
new file mode 100644
index 0000000..2307d65
--- /dev/null
+++ b/src/contextual/types/shared/parsed_block.py
@@ -0,0 +1,71 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ParsedBlock", "BoundingBox"]
+
+
+class BoundingBox(BaseModel):
+ """
+ The normalized bounding box of the block, as relative percentages of the page width and height
+ """
+
+ x0: float
+ """The x-coordinate of the top-left corner of the bounding box"""
+
+ x1: float
+ """The x-coordinate of the bottom-right corner of the bounding box"""
+
+ y0: float
+ """The y-coordinate of the top-left corner of the bounding box"""
+
+ y1: float
+ """The y-coordinate of the bottom-right corner of the bounding box"""
+
+
+class ParsedBlock(BaseModel):
+ """One logical block of content from a parsed page."""
+
+ id: str
+ """Unique ID of the block"""
+
+ bounding_box: BoundingBox
+ """
+ The normalized bounding box of the block, as relative percentages of the page
+ width and height
+ """
+
+ markdown: str
+ """The Markdown representation of the block"""
+
+ type: Literal["heading", "text", "table", "figure"]
+ """The type of the block"""
+
+ confidence_level: Optional[Literal["low", "medium", "high"]] = None
+ """The confidence level of this block categorized as 'low', 'medium', or 'high'.
+
+ Only available for blocks of type 'table' currently.
+ """
+
+ hierarchy_level: Optional[int] = None
+ """
+ The level of the block in the document hierarchy, starting at 0 for the
+ root-level title block. Only present if `enable_document_hierarchy` was set to
+ true in the request.
+ """
+
+ page_index: Optional[int] = None
+ """The page (0-indexed) that this block belongs to.
+
+ Only set for heading blocks that are returned in the table of contents.
+ """
+
+ parent_ids: Optional[List[str]] = None
+ """
+ The IDs of the parent in the document hierarchy, sorted from root-level to
+ bottom. For root-level heading blocks, this will be an empty list. Only present
+ if `enable_document_hierarchy` was set to true in the request.
+ """
diff --git a/src/contextual/types/translation_config.py b/src/contextual/types/translation_config.py
new file mode 100644
index 0000000..968cd5a
--- /dev/null
+++ b/src/contextual/types/translation_config.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["TranslationConfig"]
+
+
+class TranslationConfig(BaseModel):
+ """Captures Translation configurations for an Agent"""
+
+ translate_confidence: Optional[float] = None
+ """The confidence threshold for translation."""
+
+ translate_needed: Optional[bool] = None
+ """Whether to enable translation for the agent's responses."""
diff --git a/src/contextual/types/translation_config_param.py b/src/contextual/types/translation_config_param.py
new file mode 100644
index 0000000..f7e9b0f
--- /dev/null
+++ b/src/contextual/types/translation_config_param.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["TranslationConfigParam"]
+
+
+class TranslationConfigParam(TypedDict, total=False):
+ """Captures Translation configurations for an Agent"""
+
+ translate_confidence: float
+ """The confidence threshold for translation."""
+
+ translate_needed: bool
+ """Whether to enable translation for the agent's responses."""
diff --git a/src/contextual/types/unstructured_datastore_config_model.py b/src/contextual/types/unstructured_datastore_config_model.py
new file mode 100644
index 0000000..e743f16
--- /dev/null
+++ b/src/contextual/types/unstructured_datastore_config_model.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+from .html_configuration import HTMLConfiguration
+from .chunking_configuration import ChunkingConfiguration
+from .datastore_parse_configuration import DatastoreParseConfiguration
+
+__all__ = ["UnstructuredDatastoreConfigModel"]
+
+
+class UnstructuredDatastoreConfigModel(BaseModel):
+ """Configuration for unstructured datastores."""
+
+ chunking: Optional[ChunkingConfiguration] = None
+ """Configuration for document chunking"""
+
+ html_config: Optional[HTMLConfiguration] = None
+ """Configuration for HTML Extraction"""
+
+ parsing: Optional[DatastoreParseConfiguration] = None
+ """Configuration for document parsing"""
diff --git a/src/contextual/types/unstructured_datastore_config_model_param.py b/src/contextual/types/unstructured_datastore_config_model_param.py
new file mode 100644
index 0000000..658bd8b
--- /dev/null
+++ b/src/contextual/types/unstructured_datastore_config_model_param.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from .html_configuration_param import HTMLConfigurationParam
+from .chunking_configuration_param import ChunkingConfigurationParam
+from .datastore_parse_configuration_param import DatastoreParseConfigurationParam
+
+__all__ = ["UnstructuredDatastoreConfigModelParam"]
+
+
+class UnstructuredDatastoreConfigModelParam(TypedDict, total=False):
+ """Configuration for unstructured datastores."""
+
+ chunking: ChunkingConfigurationParam
+ """Configuration for document chunking"""
+
+ html_config: HTMLConfigurationParam
+ """Configuration for HTML Extraction"""
+
+ parsing: DatastoreParseConfigurationParam
+ """Configuration for document parsing"""
diff --git a/src/contextual/types/user_update_params.py b/src/contextual/types/user_update_params.py
index 229d81d..ed78a06 100644
--- a/src/contextual/types/user_update_params.py
+++ b/src/contextual/types/user_update_params.py
@@ -46,6 +46,8 @@ class UserUpdateParams(TypedDict, total=False):
class PerAgentRole(TypedDict, total=False):
+ """The schema used to capture agent level roles"""
+
agent_id: Required[str]
"""ID of the agent on which to grant/revoke the role."""
diff --git a/tests/api_resources/agents/test_templates.py b/tests/api_resources/agents/test_templates.py
new file mode 100644
index 0000000..0d02531
--- /dev/null
+++ b/tests/api_resources/agents/test_templates.py
@@ -0,0 +1,151 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from contextual import ContextualAI, AsyncContextualAI
+from tests.utils import assert_matches_type
+from contextual.types import AgentMetadata
+from contextual.types.agents import TemplateListResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestTemplates:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_retrieve(self, client: ContextualAI) -> None:
+ template = client.agents.templates.retrieve(
+ "template",
+ )
+ assert_matches_type(AgentMetadata, template, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: ContextualAI) -> None:
+ response = client.agents.templates.with_raw_response.retrieve(
+ "template",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ template = response.parse()
+ assert_matches_type(AgentMetadata, template, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: ContextualAI) -> None:
+ with client.agents.templates.with_streaming_response.retrieve(
+ "template",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ template = response.parse()
+ assert_matches_type(AgentMetadata, template, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: ContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `template` but received ''"):
+ client.agents.templates.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_list(self, client: ContextualAI) -> None:
+ template = client.agents.templates.list()
+ assert_matches_type(TemplateListResponse, template, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: ContextualAI) -> None:
+ response = client.agents.templates.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ template = response.parse()
+ assert_matches_type(TemplateListResponse, template, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: ContextualAI) -> None:
+ with client.agents.templates.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ template = response.parse()
+ assert_matches_type(TemplateListResponse, template, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncTemplates:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncContextualAI) -> None:
+ template = await async_client.agents.templates.retrieve(
+ "template",
+ )
+ assert_matches_type(AgentMetadata, template, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncContextualAI) -> None:
+ response = await async_client.agents.templates.with_raw_response.retrieve(
+ "template",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ template = await response.parse()
+ assert_matches_type(AgentMetadata, template, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncContextualAI) -> None:
+ async with async_client.agents.templates.with_streaming_response.retrieve(
+ "template",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ template = await response.parse()
+ assert_matches_type(AgentMetadata, template, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `template` but received ''"):
+ await async_client.agents.templates.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncContextualAI) -> None:
+ template = await async_client.agents.templates.list()
+ assert_matches_type(TemplateListResponse, template, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncContextualAI) -> None:
+ response = await async_client.agents.templates.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ template = await response.parse()
+ assert_matches_type(TemplateListResponse, template, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncContextualAI) -> None:
+ async with async_client.agents.templates.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ template = await response.parse()
+ assert_matches_type(TemplateListResponse, template, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/datastores/test_chunks.py b/tests/api_resources/datastores/test_chunks.py
new file mode 100644
index 0000000..d2aca22
--- /dev/null
+++ b/tests/api_resources/datastores/test_chunks.py
@@ -0,0 +1,130 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from contextual import ContextualAI, AsyncContextualAI
+from tests.utils import assert_matches_type
+from contextual.types.datastores import ChunkUpdateContentResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestChunks:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_update_content(self, client: ContextualAI) -> None:
+ chunk = client.datastores.chunks.update_content(
+ content_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ content="content",
+ )
+ assert_matches_type(ChunkUpdateContentResponse, chunk, path=["response"])
+
+ @parametrize
+ def test_raw_response_update_content(self, client: ContextualAI) -> None:
+ response = client.datastores.chunks.with_raw_response.update_content(
+ content_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ content="content",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ chunk = response.parse()
+ assert_matches_type(ChunkUpdateContentResponse, chunk, path=["response"])
+
+ @parametrize
+ def test_streaming_response_update_content(self, client: ContextualAI) -> None:
+ with client.datastores.chunks.with_streaming_response.update_content(
+ content_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ content="content",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ chunk = response.parse()
+ assert_matches_type(ChunkUpdateContentResponse, chunk, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_update_content(self, client: ContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `datastore_id` but received ''"):
+ client.datastores.chunks.with_raw_response.update_content(
+ content_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="",
+ content="content",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `content_id` but received ''"):
+ client.datastores.chunks.with_raw_response.update_content(
+ content_id="",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ content="content",
+ )
+
+
+class TestAsyncChunks:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_update_content(self, async_client: AsyncContextualAI) -> None:
+ chunk = await async_client.datastores.chunks.update_content(
+ content_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ content="content",
+ )
+ assert_matches_type(ChunkUpdateContentResponse, chunk, path=["response"])
+
+ @parametrize
+ async def test_raw_response_update_content(self, async_client: AsyncContextualAI) -> None:
+ response = await async_client.datastores.chunks.with_raw_response.update_content(
+ content_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ content="content",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ chunk = await response.parse()
+ assert_matches_type(ChunkUpdateContentResponse, chunk, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_update_content(self, async_client: AsyncContextualAI) -> None:
+ async with async_client.datastores.chunks.with_streaming_response.update_content(
+ content_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ content="content",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ chunk = await response.parse()
+ assert_matches_type(ChunkUpdateContentResponse, chunk, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_update_content(self, async_client: AsyncContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `datastore_id` but received ''"):
+ await async_client.datastores.chunks.with_raw_response.update_content(
+ content_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="",
+ content="content",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `content_id` but received ''"):
+ await async_client.datastores.chunks.with_raw_response.update_content(
+ content_id="",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ content="content",
+ )
diff --git a/tests/api_resources/datastores/test_documents.py b/tests/api_resources/datastores/test_documents.py
index 86caad7..fb9601e 100644
--- a/tests/api_resources/datastores/test_documents.py
+++ b/tests/api_resources/datastores/test_documents.py
@@ -14,6 +14,7 @@
from contextual.types.datastores import (
DocumentMetadata,
IngestionResponse,
+ DocumentDeleteResponse,
DocumentGetParseResultResponse,
)
@@ -80,7 +81,7 @@ def test_method_delete(self, client: ContextualAI) -> None:
document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
)
- assert_matches_type(object, document, path=["response"])
+ assert_matches_type(DocumentDeleteResponse, document, path=["response"])
@parametrize
def test_raw_response_delete(self, client: ContextualAI) -> None:
@@ -92,7 +93,7 @@ def test_raw_response_delete(self, client: ContextualAI) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
document = response.parse()
- assert_matches_type(object, document, path=["response"])
+ assert_matches_type(DocumentDeleteResponse, document, path=["response"])
@parametrize
def test_streaming_response_delete(self, client: ContextualAI) -> None:
@@ -104,7 +105,7 @@ def test_streaming_response_delete(self, client: ContextualAI) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
document = response.parse()
- assert_matches_type(object, document, path=["response"])
+ assert_matches_type(DocumentDeleteResponse, document, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -406,7 +407,7 @@ async def test_method_delete(self, async_client: AsyncContextualAI) -> None:
document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
)
- assert_matches_type(object, document, path=["response"])
+ assert_matches_type(DocumentDeleteResponse, document, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncContextualAI) -> None:
@@ -418,7 +419,7 @@ async def test_raw_response_delete(self, async_client: AsyncContextualAI) -> Non
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
document = await response.parse()
- assert_matches_type(object, document, path=["response"])
+ assert_matches_type(DocumentDeleteResponse, document, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncContextualAI) -> None:
@@ -430,7 +431,7 @@ async def test_streaming_response_delete(self, async_client: AsyncContextualAI)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
document = await response.parse()
- assert_matches_type(object, document, path=["response"])
+ assert_matches_type(DocumentDeleteResponse, document, path=["response"])
assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py
index f892cce..1aff96a 100644
--- a/tests/api_resources/test_agents.py
+++ b/tests/api_resources/test_agents.py
@@ -12,6 +12,8 @@
from contextual.types import (
Agent,
CreateAgentOutput,
+ AgentDeleteResponse,
+ AgentUpdateResponse,
AgentMetadataResponse,
)
from contextual.pagination import SyncPage, AsyncPage
@@ -136,7 +138,7 @@ def test_method_update(self, client: ContextualAI) -> None:
agent = client.agents.update(
agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
)
- assert_matches_type(object, agent, path=["response"])
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
@parametrize
def test_method_update_with_all_params(self, client: ContextualAI) -> None:
@@ -214,7 +216,7 @@ def test_method_update_with_all_params(self, client: ContextualAI) -> None:
suggested_queries=["string"],
system_prompt="system_prompt",
)
- assert_matches_type(object, agent, path=["response"])
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
@parametrize
def test_raw_response_update(self, client: ContextualAI) -> None:
@@ -225,7 +227,7 @@ def test_raw_response_update(self, client: ContextualAI) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = response.parse()
- assert_matches_type(object, agent, path=["response"])
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
@parametrize
def test_streaming_response_update(self, client: ContextualAI) -> None:
@@ -236,7 +238,7 @@ def test_streaming_response_update(self, client: ContextualAI) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = response.parse()
- assert_matches_type(object, agent, path=["response"])
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -285,7 +287,7 @@ def test_method_delete(self, client: ContextualAI) -> None:
agent = client.agents.delete(
"182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
)
- assert_matches_type(object, agent, path=["response"])
+ assert_matches_type(AgentDeleteResponse, agent, path=["response"])
@parametrize
def test_raw_response_delete(self, client: ContextualAI) -> None:
@@ -296,7 +298,7 @@ def test_raw_response_delete(self, client: ContextualAI) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = response.parse()
- assert_matches_type(object, agent, path=["response"])
+ assert_matches_type(AgentDeleteResponse, agent, path=["response"])
@parametrize
def test_streaming_response_delete(self, client: ContextualAI) -> None:
@@ -307,7 +309,7 @@ def test_streaming_response_delete(self, client: ContextualAI) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = response.parse()
- assert_matches_type(object, agent, path=["response"])
+ assert_matches_type(AgentDeleteResponse, agent, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -432,6 +434,48 @@ def test_path_params_reset(self, client: ContextualAI) -> None:
"",
)
+ @parametrize
+ def test_method_save_template(self, client: ContextualAI) -> None:
+ agent = client.agents.save_template(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ name="name",
+ )
+ assert_matches_type(object, agent, path=["response"])
+
+ @parametrize
+ def test_raw_response_save_template(self, client: ContextualAI) -> None:
+ response = client.agents.with_raw_response.save_template(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ name="name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = response.parse()
+ assert_matches_type(object, agent, path=["response"])
+
+ @parametrize
+ def test_streaming_response_save_template(self, client: ContextualAI) -> None:
+ with client.agents.with_streaming_response.save_template(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ name="name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = response.parse()
+ assert_matches_type(object, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_save_template(self, client: ContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ client.agents.with_raw_response.save_template(
+ agent_id="",
+ name="name",
+ )
+
class TestAsyncAgents:
parametrize = pytest.mark.parametrize(
@@ -552,7 +596,7 @@ async def test_method_update(self, async_client: AsyncContextualAI) -> None:
agent = await async_client.agents.update(
agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
)
- assert_matches_type(object, agent, path=["response"])
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncContextualAI) -> None:
@@ -630,7 +674,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncContextual
suggested_queries=["string"],
system_prompt="system_prompt",
)
- assert_matches_type(object, agent, path=["response"])
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
@parametrize
async def test_raw_response_update(self, async_client: AsyncContextualAI) -> None:
@@ -641,7 +685,7 @@ async def test_raw_response_update(self, async_client: AsyncContextualAI) -> Non
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = await response.parse()
- assert_matches_type(object, agent, path=["response"])
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
@parametrize
async def test_streaming_response_update(self, async_client: AsyncContextualAI) -> None:
@@ -652,7 +696,7 @@ async def test_streaming_response_update(self, async_client: AsyncContextualAI)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = await response.parse()
- assert_matches_type(object, agent, path=["response"])
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -701,7 +745,7 @@ async def test_method_delete(self, async_client: AsyncContextualAI) -> None:
agent = await async_client.agents.delete(
"182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
)
- assert_matches_type(object, agent, path=["response"])
+ assert_matches_type(AgentDeleteResponse, agent, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncContextualAI) -> None:
@@ -712,7 +756,7 @@ async def test_raw_response_delete(self, async_client: AsyncContextualAI) -> Non
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = await response.parse()
- assert_matches_type(object, agent, path=["response"])
+ assert_matches_type(AgentDeleteResponse, agent, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncContextualAI) -> None:
@@ -723,7 +767,7 @@ async def test_streaming_response_delete(self, async_client: AsyncContextualAI)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = await response.parse()
- assert_matches_type(object, agent, path=["response"])
+ assert_matches_type(AgentDeleteResponse, agent, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -847,3 +891,45 @@ async def test_path_params_reset(self, async_client: AsyncContextualAI) -> None:
await async_client.agents.with_raw_response.reset(
"",
)
+
+ @parametrize
+ async def test_method_save_template(self, async_client: AsyncContextualAI) -> None:
+ agent = await async_client.agents.save_template(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ name="name",
+ )
+ assert_matches_type(object, agent, path=["response"])
+
+ @parametrize
+ async def test_raw_response_save_template(self, async_client: AsyncContextualAI) -> None:
+ response = await async_client.agents.with_raw_response.save_template(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ name="name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = await response.parse()
+ assert_matches_type(object, agent, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_save_template(self, async_client: AsyncContextualAI) -> None:
+ async with async_client.agents.with_streaming_response.save_template(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ name="name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = await response.parse()
+ assert_matches_type(object, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_save_template(self, async_client: AsyncContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ await async_client.agents.with_raw_response.save_template(
+ agent_id="",
+ name="name",
+ )
diff --git a/tests/api_resources/test_datastores.py b/tests/api_resources/test_datastores.py
index 053c3ed..d69b176 100644
--- a/tests/api_resources/test_datastores.py
+++ b/tests/api_resources/test_datastores.py
@@ -12,7 +12,9 @@
from contextual.types import (
Datastore,
DatastoreMetadata,
+ DatastoreResetResponse,
CreateDatastoreResponse,
+ DatastoreDeleteResponse,
DatastoreUpdateResponse,
)
from contextual.pagination import SyncDatastoresPage, AsyncDatastoresPage
@@ -176,7 +178,7 @@ def test_method_delete(self, client: ContextualAI) -> None:
datastore = client.datastores.delete(
"182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
)
- assert_matches_type(object, datastore, path=["response"])
+ assert_matches_type(DatastoreDeleteResponse, datastore, path=["response"])
@parametrize
def test_raw_response_delete(self, client: ContextualAI) -> None:
@@ -187,7 +189,7 @@ def test_raw_response_delete(self, client: ContextualAI) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
datastore = response.parse()
- assert_matches_type(object, datastore, path=["response"])
+ assert_matches_type(DatastoreDeleteResponse, datastore, path=["response"])
@parametrize
def test_streaming_response_delete(self, client: ContextualAI) -> None:
@@ -198,7 +200,7 @@ def test_streaming_response_delete(self, client: ContextualAI) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
datastore = response.parse()
- assert_matches_type(object, datastore, path=["response"])
+ assert_matches_type(DatastoreDeleteResponse, datastore, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -252,7 +254,7 @@ def test_method_reset(self, client: ContextualAI) -> None:
datastore = client.datastores.reset(
"182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
)
- assert_matches_type(object, datastore, path=["response"])
+ assert_matches_type(DatastoreResetResponse, datastore, path=["response"])
@parametrize
def test_raw_response_reset(self, client: ContextualAI) -> None:
@@ -263,7 +265,7 @@ def test_raw_response_reset(self, client: ContextualAI) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
datastore = response.parse()
- assert_matches_type(object, datastore, path=["response"])
+ assert_matches_type(DatastoreResetResponse, datastore, path=["response"])
@parametrize
def test_streaming_response_reset(self, client: ContextualAI) -> None:
@@ -274,7 +276,7 @@ def test_streaming_response_reset(self, client: ContextualAI) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
datastore = response.parse()
- assert_matches_type(object, datastore, path=["response"])
+ assert_matches_type(DatastoreResetResponse, datastore, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -444,7 +446,7 @@ async def test_method_delete(self, async_client: AsyncContextualAI) -> None:
datastore = await async_client.datastores.delete(
"182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
)
- assert_matches_type(object, datastore, path=["response"])
+ assert_matches_type(DatastoreDeleteResponse, datastore, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncContextualAI) -> None:
@@ -455,7 +457,7 @@ async def test_raw_response_delete(self, async_client: AsyncContextualAI) -> Non
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
datastore = await response.parse()
- assert_matches_type(object, datastore, path=["response"])
+ assert_matches_type(DatastoreDeleteResponse, datastore, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncContextualAI) -> None:
@@ -466,7 +468,7 @@ async def test_streaming_response_delete(self, async_client: AsyncContextualAI)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
datastore = await response.parse()
- assert_matches_type(object, datastore, path=["response"])
+ assert_matches_type(DatastoreDeleteResponse, datastore, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -520,7 +522,7 @@ async def test_method_reset(self, async_client: AsyncContextualAI) -> None:
datastore = await async_client.datastores.reset(
"182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
)
- assert_matches_type(object, datastore, path=["response"])
+ assert_matches_type(DatastoreResetResponse, datastore, path=["response"])
@parametrize
async def test_raw_response_reset(self, async_client: AsyncContextualAI) -> None:
@@ -531,7 +533,7 @@ async def test_raw_response_reset(self, async_client: AsyncContextualAI) -> None
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
datastore = await response.parse()
- assert_matches_type(object, datastore, path=["response"])
+ assert_matches_type(DatastoreResetResponse, datastore, path=["response"])
@parametrize
async def test_streaming_response_reset(self, async_client: AsyncContextualAI) -> None:
@@ -542,7 +544,7 @@ async def test_streaming_response_reset(self, async_client: AsyncContextualAI) -
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
datastore = await response.parse()
- assert_matches_type(object, datastore, path=["response"])
+ assert_matches_type(DatastoreResetResponse, datastore, path=["response"])
assert cast(Any, response.is_closed) is True