diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 36b2aff..f996350 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.34" + ".": "0.1.0-alpha.35" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index c021c17..7a6c349 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 70 -openapi_spec_hash: 9018ebfb2a9e1afa87058b3a4bd41b0b +openapi_spec_hash: 11279400677011ad5dc1ebba33216ae4 config_hash: aad16f20fed13ac50211fc1d0e2ea621 diff --git a/CHANGELOG.md b/CHANGELOG.md index 7782cb1..b3014e4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 0.1.0-alpha.35 (2025-11-26) + +Full Changelog: [v0.1.0-alpha.34...v0.1.0-alpha.35](https://github.com/cleanlab/codex-python/compare/v0.1.0-alpha.34...v0.1.0-alpha.35) + +### Features + +* **api:** api update ([94bacaf](https://github.com/cleanlab/codex-python/commit/94bacaf492809bc9bc15175d272de53ad2569895)) +* **api:** api update ([884de94](https://github.com/cleanlab/codex-python/commit/884de944e616b26580830817486bb85e74f1e7c4)) + + +### Chores + +* add Python 3.14 classifier and testing ([4dec29c](https://github.com/cleanlab/codex-python/commit/4dec29cdf74dd3beeccf326678db7170156f0c44)) + ## 0.1.0-alpha.34 (2025-11-19) Full Changelog: [v0.1.0-alpha.33...v0.1.0-alpha.34](https://github.com/cleanlab/codex-python/compare/v0.1.0-alpha.33...v0.1.0-alpha.34) diff --git a/pyproject.toml b/pyproject.toml index 5fb2418..8f23045 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "codex-sdk" -version = "0.1.0-alpha.34" +version = "0.1.0-alpha.35" description = "Internal SDK used within cleanlab-codex package. Refer to https://pypi.org/project/cleanlab-codex/ instead." dynamic = ["readme"] license = "MIT" @@ -24,6 +24,7 @@ classifiers = [ "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: MacOS", diff --git a/src/codex/_version.py b/src/codex/_version.py index c2ea81e..0bddca9 100644 --- a/src/codex/_version.py +++ b/src/codex/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "codex" -__version__ = "0.1.0-alpha.34" # x-release-please-version +__version__ = "0.1.0-alpha.35" # x-release-please-version diff --git a/src/codex/types/projects/query_log_list_by_group_response.py b/src/codex/types/projects/query_log_list_by_group_response.py index 1a89baa..34b722b 100644 --- a/src/codex/types/projects/query_log_list_by_group_response.py +++ b/src/codex/types/projects/query_log_list_by_group_response.py @@ -467,12 +467,6 @@ class QueryLogsByGroupQueryLog(BaseModel): expert review. Expert review will override the original guardrail decision. """ - expert_override_log_id: Optional[str] = None - """ - ID of the query log with expert review that overrode the original guardrail - decision. - """ - expert_review_created_at: Optional[datetime] = None """When the expert review was created""" @@ -529,6 +523,15 @@ class QueryLogsByGroupQueryLog(BaseModel): primary_eval_issue_score: Optional[float] = None """Score of the primary eval issue""" + system_prompt: Optional[str] = None + """ + Content of the first system message associated with this query log, if + available. + """ + + system_prompt_hash: Optional[str] = None + """SHA-256 hash of the system prompt content for quick equality checks.""" + tools: Optional[List[QueryLogsByGroupQueryLogTool]] = None """Tools to use for the LLM call. diff --git a/src/codex/types/projects/query_log_list_groups_response.py b/src/codex/types/projects/query_log_list_groups_response.py index fe70223..1587af7 100644 --- a/src/codex/types/projects/query_log_list_groups_response.py +++ b/src/codex/types/projects/query_log_list_groups_response.py @@ -462,12 +462,6 @@ class QueryLogListGroupsResponse(BaseModel): expert review. Expert review will override the original guardrail decision. """ - expert_override_log_id: Optional[str] = None - """ - ID of the query log with expert review that overrode the original guardrail - decision. - """ - expert_review_created_at: Optional[datetime] = None """When the expert review was created""" @@ -524,6 +518,15 @@ class QueryLogListGroupsResponse(BaseModel): primary_eval_issue_score: Optional[float] = None """Score of the primary eval issue""" + system_prompt: Optional[str] = None + """ + Content of the first system message associated with this query log, if + available. + """ + + system_prompt_hash: Optional[str] = None + """SHA-256 hash of the system prompt content for quick equality checks.""" + tools: Optional[List[Tool]] = None """Tools to use for the LLM call. diff --git a/src/codex/types/projects/query_log_list_response.py b/src/codex/types/projects/query_log_list_response.py index dc7768f..e71f05b 100644 --- a/src/codex/types/projects/query_log_list_response.py +++ b/src/codex/types/projects/query_log_list_response.py @@ -450,12 +450,6 @@ class QueryLogListResponse(BaseModel): expert review. Expert review will override the original guardrail decision. """ - expert_override_log_id: Optional[str] = None - """ - ID of the query log with expert review that overrode the original guardrail - decision. - """ - expert_review_created_at: Optional[datetime] = None """When the expert review was created""" @@ -509,6 +503,15 @@ class QueryLogListResponse(BaseModel): primary_eval_issue_score: Optional[float] = None """Score of the primary eval issue""" + system_prompt: Optional[str] = None + """ + Content of the first system message associated with this query log, if + available. + """ + + system_prompt_hash: Optional[str] = None + """SHA-256 hash of the system prompt content for quick equality checks.""" + tools: Optional[List[Tool]] = None """Tools to use for the LLM call. diff --git a/src/codex/types/projects/query_log_retrieve_response.py b/src/codex/types/projects/query_log_retrieve_response.py index db91943..6116840 100644 --- a/src/codex/types/projects/query_log_retrieve_response.py +++ b/src/codex/types/projects/query_log_retrieve_response.py @@ -367,6 +367,8 @@ class QueryLogRetrieveResponse(BaseModel): expert_answer_id: Optional[str] = None + expert_override_log_id: Optional[str] = None + formatted_escalation_eval_scores: Optional[Dict[str, FormattedEscalationEvalScores]] = None formatted_eval_scores: Optional[Dict[str, FormattedEvalScores]] = None @@ -392,6 +394,8 @@ class QueryLogRetrieveResponse(BaseModel): issue_status: Literal["addressed", "unaddressed"] """Manual review status override for remediations.""" + log_needs_review: bool + needs_review: bool project_id: str @@ -457,12 +461,6 @@ class QueryLogRetrieveResponse(BaseModel): expert review. Expert review will override the original guardrail decision. """ - expert_override_log_id: Optional[str] = None - """ - ID of the query log with expert review that overrode the original guardrail - decision. - """ - expert_review_created_at: Optional[datetime] = None """When the expert review was created""" @@ -519,6 +517,15 @@ class QueryLogRetrieveResponse(BaseModel): primary_eval_issue_score: Optional[float] = None """Score of the primary eval issue""" + system_prompt: Optional[str] = None + """ + Content of the first system message associated with this query log, if + available. + """ + + system_prompt_hash: Optional[str] = None + """SHA-256 hash of the system prompt content for quick equality checks.""" + tools: Optional[List[Tool]] = None """Tools to use for the LLM call. diff --git a/src/codex/types/projects/remediation_list_resolved_logs_response.py b/src/codex/types/projects/remediation_list_resolved_logs_response.py index 9f1b77b..986f898 100644 --- a/src/codex/types/projects/remediation_list_resolved_logs_response.py +++ b/src/codex/types/projects/remediation_list_resolved_logs_response.py @@ -457,12 +457,6 @@ class QueryLog(BaseModel): expert review. Expert review will override the original guardrail decision. """ - expert_override_log_id: Optional[str] = None - """ - ID of the query log with expert review that overrode the original guardrail - decision. - """ - expert_review_created_at: Optional[datetime] = None """When the expert review was created""" @@ -516,6 +510,15 @@ class QueryLog(BaseModel): primary_eval_issue_score: Optional[float] = None """Score of the primary eval issue""" + system_prompt: Optional[str] = None + """ + Content of the first system message associated with this query log, if + available. + """ + + system_prompt_hash: Optional[str] = None + """SHA-256 hash of the system prompt content for quick equality checks.""" + tools: Optional[List[QueryLogTool]] = None """Tools to use for the LLM call. diff --git a/src/codex/types/projects/remediations/expert_review_list_response.py b/src/codex/types/projects/remediations/expert_review_list_response.py index 99d26ab..eadb974 100644 --- a/src/codex/types/projects/remediations/expert_review_list_response.py +++ b/src/codex/types/projects/remediations/expert_review_list_response.py @@ -16,6 +16,8 @@ class ExpertReviewListResponse(BaseModel): evaluated_response: Optional[str] = None + expert_override_log_id: str + last_edited_at: datetime last_edited_by: Optional[str] = None diff --git a/src/codex/types/projects/remediations/expert_review_retrieve_response.py b/src/codex/types/projects/remediations/expert_review_retrieve_response.py index 9cb0da6..f48fac2 100644 --- a/src/codex/types/projects/remediations/expert_review_retrieve_response.py +++ b/src/codex/types/projects/remediations/expert_review_retrieve_response.py @@ -16,6 +16,8 @@ class ExpertReviewRetrieveResponse(BaseModel): evaluated_response: Optional[str] = None + expert_override_log_id: str + last_edited_at: datetime last_edited_by: Optional[str] = None