Skip to content

Commit da7af93

Browse files
committed
Release v0.2.8: Add support for model reasoning/thinking content
- Added reasoning support for GLM-4 (reasoning_content) - Added reasoning support for OpenRouter models (reasoning field) - Added 'Reasoning' panel in REPL to display thinking process - Updated logging to capture reasoning data - Updated parser to handle multi-format reasoning fields - Bump version to 0.2.8
1 parent 5657cca commit da7af93

8 files changed

Lines changed: 110 additions & 10 deletions

File tree

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# 🤖 SuperCoder
22

3-
[![Version](https://img.shields.io/badge/version-0.2.7-blue.svg)](https://github.com/Mage212/supercoder)
3+
[![Version](https://img.shields.io/badge/version-0.2.8-blue.svg)](https://github.com/Mage212/supercoder)
44
[![Python](https://img.shields.io/badge/python-3.11+-green.svg)](https://python.org)
55
[![License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE)
66

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "supercoder"
3-
version = "0.2.6"
3+
version = "0.2.8"
44
description = "AI Coding Assistant for the Terminal"
55
readme = "README.md"
66
requires-python = ">=3.11"

supercoder/agent/coder_agent.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -189,6 +189,7 @@ def chat_stream(self, user_message: str):
189189

190190
# Stream response
191191
response_text = ""
192+
reasoning_text = ""
192193

193194
try:
194195
for chunk in self.llm.chat_stream(messages):
@@ -197,9 +198,15 @@ def chat_stream(self, user_message: str):
197198
raise AgentAbortedError("Agent execution aborted by user")
198199

199200
if not chunk.is_done:
201+
# Yield reasoning if present
202+
if chunk.reasoning:
203+
yield {"type": "reasoning", "content": chunk.reasoning}
204+
reasoning_text += chunk.reasoning
205+
200206
# Yield token for real-time display
201-
yield {"type": "token", "content": chunk.content}
202-
response_text += chunk.content
207+
if chunk.content:
208+
yield {"type": "token", "content": chunk.content}
209+
response_text += chunk.content
203210

204211
# Signal end of text generation
205212
yield {"type": "done", "content": ""}
@@ -226,7 +233,7 @@ def chat_stream(self, user_message: str):
226233
if response_text:
227234
self.context.add_message(Message("assistant", response_text))
228235
# Log model response
229-
get_logger().log_model_response(response_text, self.llm.model)
236+
get_logger().log_model_response(response_text, self.llm.model, reasoning=reasoning_text)
230237
# Auto-save session
231238
self._save_current_session()
232239

supercoder/llm/base.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ class Message:
1616
class StreamChunk:
1717
"""Streaming response chunk."""
1818
content: str
19+
reasoning: str = "" # reasoning_content from models like GLM
1920
is_done: bool = False
2021

2122

supercoder/llm/openai_client.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,8 +77,14 @@ def chat_stream(self, messages: list[Message]) -> Iterator[StreamChunk]:
7777
)
7878

7979
for chunk in stream:
80-
if chunk.choices and chunk.choices[0].delta.content:
81-
yield StreamChunk(content=chunk.choices[0].delta.content)
80+
if chunk.choices:
81+
delta = chunk.choices[0].delta
82+
content = delta.content or ""
83+
# Check for reasoning_content (GLM, DeepSeek, etc.) or just reasoning (OpenRouter)
84+
reasoning = getattr(delta, "reasoning_content", "") or getattr(delta, "reasoning", "") or ""
85+
86+
if content or reasoning:
87+
yield StreamChunk(content=content, reasoning=reasoning)
8288

8389
yield StreamChunk(content="", is_done=True)
8490

supercoder/logging.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,16 +52,22 @@ def log_user_input(self, message: str) -> None:
5252
"timestamp": datetime.now().isoformat(),
5353
})
5454

55-
def log_model_response(self, response: str, model: Optional[str] = None) -> None:
55+
def log_model_response(self, response: str, model: Optional[str] = None, reasoning: Optional[str] = None) -> None:
5656
"""Log model response."""
5757
if not self.enabled:
5858
return
59-
self._write_entry({
59+
60+
entry = {
6061
"type": "assistant",
6162
"model": model or self.model_name,
6263
"content": response,
6364
"timestamp": datetime.now().isoformat(),
64-
})
65+
}
66+
67+
if reasoning:
68+
entry["reasoning"] = reasoning
69+
70+
self._write_entry(entry)
6571

6672
def log_system_prompt(self, prompt: str) -> None:
6773
"""Log the current system prompt."""

supercoder/repl.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,7 @@ def _handle_chat(self, message):
158158

159159
# Collect full response first, then display
160160
response_text = ""
161+
reasoning_text = ""
161162
tool_calls = []
162163
tool_results = []
163164
errors = []
@@ -181,6 +182,8 @@ def _handle_chat(self, message):
181182

182183
if event_type == "token":
183184
response_text += content
185+
elif event_type == "reasoning":
186+
reasoning_text += content
184187
elif event_type == "tool_call":
185188
tool_calls.append(content)
186189
# Track files from tool args
@@ -238,6 +241,15 @@ def _handle_chat(self, message):
238241
for error in errors:
239242
self.console.print(Panel(f"[red]{error}[/]", title="[bold red]Error[/]", border_style="red"))
240243

244+
# 2.5 Display Reasoning (New)
245+
if reasoning_text:
246+
self.console.print(Panel(
247+
Markdown(reasoning_text),
248+
title="[bold magenta]💭 Reasoning[/]",
249+
border_style="magenta",
250+
box=self._get_box_style()
251+
))
252+
241253
# 3. Display Assistant Response
242254
clean_text = self._filter_special_tokens(response_text)
243255
if clean_text:

tests/test_reasoning_extraction.py

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
2+
import pytest
3+
from unittest.mock import MagicMock, Mock
4+
from supercoder.llm.openai_client import OpenAIClient
5+
from supercoder.llm.base import Message, StreamChunk
6+
from supercoder.config import Config
7+
8+
class MockDelta:
9+
def __init__(self, content=None, reasoning_content=None):
10+
self.content = content
11+
if reasoning_content is not None:
12+
self.reasoning_content = reasoning_content
13+
14+
class MockChoice:
15+
def __init__(self, delta):
16+
self.delta = delta
17+
18+
class MockChunk:
19+
def __init__(self, choices=None):
20+
self.choices = choices
21+
22+
def test_chat_stream_extracts_reasoning():
23+
"""Test that chat_stream extracts reasoning_content correctly."""
24+
25+
# Setup mock config
26+
config = Config()
27+
config.api_key = "test_key"
28+
29+
# Setup mock client
30+
client = OpenAIClient(config)
31+
client.client = MagicMock()
32+
33+
# Mock streaming response
34+
# 1. Chunk with reasoning only
35+
# 2. Chunk with content only
36+
# 3. Chunk with both
37+
mock_chunks = [
38+
MockChunk(choices=[MockChoice(delta=MockDelta(content=None, reasoning_content="Thinking..."))]),
39+
MockChunk(choices=[MockChoice(delta=MockDelta(content="Hello", reasoning_content=None))]),
40+
MockChunk(choices=[MockChoice(delta=MockDelta(content=" World", reasoning_content=" (Done)"))]),
41+
]
42+
43+
client.client.chat.completions.create.return_value = mock_chunks
44+
45+
# Run chat_stream
46+
chunks = list(client.chat_stream([Message("user", "test")]))
47+
48+
# Verify chunks (ignoring the last "is_done" chunk)
49+
assert len(chunks) == 4 # 3 data chunks + 1 done chunk
50+
51+
# Chunk 1: Reasoning only
52+
assert chunks[0].content == ""
53+
assert chunks[0].reasoning == "Thinking..."
54+
55+
# Chunk 2: Content only
56+
assert chunks[1].content == "Hello"
57+
assert chunks[1].reasoning == ""
58+
59+
# Chunk 3: Both
60+
assert chunks[2].content == " World"
61+
assert chunks[2].reasoning == " (Done)"
62+
63+
# Chunk 4: Done
64+
assert chunks[3].is_done is True
65+
66+
if __name__ == "__main__":
67+
test_chat_stream_extracts_reasoning()
68+
print("Test passed!")

0 commit comments

Comments
 (0)