Skip to content

Commit d8fc198

Browse files
Vadim MitroshkinVadim Mitroshkin
authored andcommitted
v0.2.2: UI improvements, interactive command handling, tool calling configuration
- Enhanced chat interface with styled user/assistant messages - EOF-based input handling for interactive commands to prevent hangs - Custom HTTP headers with tool name and repository link - New tool_calling_type config parameter for model-specific tool calling - Updated README with version badges and What's New section
1 parent 45e8b03 commit d8fc198

15 files changed

Lines changed: 1004 additions & 129 deletions

README.md

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,22 @@
11
# 🤖 SuperCoder
22

3+
[![Version](https://img.shields.io/badge/version-0.2.2-blue.svg)](https://github.com/Mage212/supercoder)
4+
[![Python](https://img.shields.io/badge/python-3.11+-green.svg)](https://python.org)
5+
[![License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE)
6+
37
**AI Coding Assistant for the Terminal** — A powerful, extensible, and terminal-native coding agent designed to help you build, search, and fix code with natural language.
48

59
---
610

11+
## 🆕 What's New in v0.2.2
12+
13+
- **Improved UI**: Enhanced chat interface with styled user/assistant messages and better visual feedback
14+
- **Interactive Command Execution**: EOF-based input handling to prevent command hangs
15+
- **Custom Headers**: Tool name and repository link now passed in HTTP headers for better API compatibility
16+
- **Tool Calling Configuration**: New `tool_calling_type` parameter in config to specify model-specific tool calling instructions (supports `supercoder`, `qwen_like`, `json_block`, `xml_function`)
17+
18+
---
19+
720
## ✨ Core Features
821

922
### 🔍 Code Search

pyproject.toml

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "supercoder"
3-
version = "0.2.0"
3+
version = "0.2.2"
44
description = "AI Coding Assistant for the Terminal"
55
readme = "README.md"
66
requires-python = ">=3.11"
@@ -9,7 +9,6 @@ authors = [
99
{name = "SuperCoder Team"}
1010
]
1111
keywords = ["ai", "coding", "assistant", "llm", "cli"]
12-
1312
dependencies = [
1413
"openai>=2.13.0",
1514
"click>=8.3.1",
@@ -22,6 +21,11 @@ dependencies = [
2221
"tiktoken>=0.12.0",
2322
]
2423

24+
[project.urls]
25+
Homepage = "https://github.com/Mage212/supercoder"
26+
Repository = "https://github.com/Mage212/supercoder"
27+
Issues = "https://github.com/Mage212/supercoder/issues"
28+
2529
[project.optional-dependencies]
2630
dev = [
2731
"pytest>=7.0.0",

requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ prompt-toolkit>=3.0.52
99
tiktoken>=0.12.0
1010
pyyaml>=6.0.0
1111
pygments>=2.17.0
12+
questionary>=2.0.0
1213

1314
# RepoMap
1415
tree-sitter-languages>=1.10.2

supercoder/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
"""SuperCoder - AI Coding Assistant for the Terminal."""
22

3-
__version__ = "0.2.0"
3+
__version__ = "0.2.2"

supercoder/agent/coder_agent.py

Lines changed: 70 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,8 @@ def __init__(
2828
tools: list[BaseTool] | None = None,
2929
context_config: ContextConfig | None = None,
3030
use_repo_map: bool = False,
31-
repo_root: str = "."
31+
repo_root: str = ".",
32+
tool_calling_type: str = "supercoder"
3233
):
3334
self.llm = llm
3435
self.tools = {t.definition.name: t for t in (tools or [])}
@@ -43,8 +44,17 @@ def __init__(
4344
self.rules_loader.ensure_rules_dir() # Create .supercoder/rules/ if missing
4445
project_rules = self.rules_loader.get_rules_for_prompt()
4546

47+
# Store tool calling type for prompt generation
48+
self.tool_calling_type = tool_calling_type
49+
self._tools_list = tools or [] # Keep reference for prompt rebuilding
50+
self._project_rules = project_rules
51+
4652
# Build system prompt template with tools and project rules
47-
self.base_system_prompt = build_system_prompt(tools or [], rules=project_rules)
53+
self.base_system_prompt = build_system_prompt(
54+
self._tools_list,
55+
rules=project_rules,
56+
tool_calling_type=self.tool_calling_type
57+
)
4858

4959
# Setup context management
5060
config = context_config or ContextConfig()
@@ -77,6 +87,20 @@ def _update_system_prompt(self):
7787
# Log the updated system prompt
7888
get_logger().log_system_prompt(prompt)
7989

90+
def set_tool_calling_type(self, tool_calling_type: str) -> None:
91+
"""Update tool calling type and rebuild system prompt.
92+
93+
Call this when switching to a model with a different tool_calling_type.
94+
"""
95+
if tool_calling_type != self.tool_calling_type:
96+
self.tool_calling_type = tool_calling_type
97+
self.base_system_prompt = build_system_prompt(
98+
self._tools_list,
99+
rules=self._project_rules,
100+
tool_calling_type=self.tool_calling_type
101+
)
102+
self._update_system_prompt()
103+
80104
def chat_stream(self, user_message: str):
81105
"""Process user message and yield response chunks.
82106
@@ -142,7 +166,49 @@ def chat_stream(self, user_message: str):
142166
continue
143167

144168
try:
145-
result = self.tools[name].execute(args)
169+
tool = self.tools[name]
170+
171+
# Use streaming for command-exec to handle interactive commands
172+
if name == "command-exec" and hasattr(tool, 'execute_streaming'):
173+
result = ""
174+
process_to_kill = None
175+
176+
for event in tool.execute_streaming(args):
177+
if event["type"] == "waiting_input":
178+
# Yield to REPL for user decision - REPL will kill the process if needed
179+
process_to_kill = event.get("process")
180+
yield {
181+
"type": "command_waiting",
182+
"content": event["content"],
183+
"process": process_to_kill,
184+
"tool_name": name
185+
}
186+
# After REPL handles it, the process is either killed or we continue
187+
# Check if process was killed
188+
if process_to_kill and process_to_kill.poll() is not None:
189+
# Process was killed by REPL - tell model NOT to retry
190+
partial_output = ''.join(event.get('stdout', []))
191+
result = (
192+
f"⚠️ INTERACTIVE PROCESS KILLED BY USER\n"
193+
f"The command requires user input which cannot be provided in this environment.\n"
194+
f"DO NOT attempt to run this command again.\n\n"
195+
f"Partial output before kill:\n{partial_output}"
196+
)
197+
break
198+
# Otherwise continue waiting until timeout
199+
elif event["type"] == "output":
200+
# Incremental output - could stream to console
201+
pass
202+
elif event["type"] == "done":
203+
result = event["content"]
204+
elif event["type"] == "stalled":
205+
result = f"{event['content']}\n\n(waiting for command to complete...)"
206+
elif event["type"] == "error":
207+
result = event["content"]
208+
else:
209+
result = tool.execute(args)
210+
211+
146212
yield {"type": "tool_result", "content": {"name": name, "result": result}}
147213
# Log tool call and result
148214
get_logger().log_tool_call(name, args)
@@ -162,6 +228,7 @@ def chat_stream(self, user_message: str):
162228
# Let agent continue with tool results (recursive call handling)
163229
yield from self.chat_stream("")
164230

231+
165232
def _extract_tool_call(self, text: str) -> dict | None:
166233
"""Extract tool call from response text using multi-format parser."""
167234
result = self.tool_parser.parse(text)

supercoder/agent/prompts.py

Lines changed: 18 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
11
"""System prompts for the agent."""
22

3+
from .tool_calling_prompts import get_tool_calling_prompt
4+
35
# Compact prompt optimized for local/smaller models
6+
# Tool calling instructions are injected dynamically based on model's tool_calling_type
47
SYSTEM_PROMPT = """You are a coding assistant.
58
6-
# Tool Calling
7-
Call tools with <@TOOL>{{"name": "<tool-name>", "arguments": "<json-args>"}}</@TOOL>
9+
{tool_calling_instructions}
810
911
Available tools:
1012
{tools}
@@ -45,12 +47,18 @@
4547
"""
4648

4749

48-
def build_system_prompt(tools: list, rules: str = "") -> str:
50+
def build_system_prompt(
51+
tools: list,
52+
rules: str = "",
53+
tool_calling_type: str = "supercoder"
54+
) -> str:
4955
"""Build system prompt with available tools and project rules.
5056
5157
Args:
5258
tools: List of available tools.
5359
rules: Optional project-specific rules to include.
60+
tool_calling_type: Type of tool calling format to use in instructions.
61+
Valid: supercoder, qwen_like, json_block, xml_function
5462
"""
5563
if not tools:
5664
tool_list = "(no tools available yet)"
@@ -60,11 +68,16 @@ def build_system_prompt(tools: list, rules: str = "") -> str:
6068
for t in tools
6169
)
6270

63-
prompt = SYSTEM_PROMPT.format(tools=tool_list)
71+
# Get tool calling instructions for this model type
72+
tool_calling_instructions = get_tool_calling_prompt(tool_calling_type)
73+
74+
prompt = SYSTEM_PROMPT.format(
75+
tools=tool_list,
76+
tool_calling_instructions=tool_calling_instructions
77+
)
6478

6579
# Add project rules if provided
6680
if rules:
6781
prompt += f"\n{rules}"
6882

6983
return prompt
70-
Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
"""Tool calling prompt templates for different model types.
2+
3+
Each model may expect tools to be called in a specific format.
4+
This module provides format-specific instructions that are injected
5+
into the system prompt based on the model's `tool_calling_type`.
6+
"""
7+
8+
# Tool calling instruction templates for each supported type
9+
TOOL_CALLING_PROMPTS = {
10+
# Our native format - most explicit and clear
11+
"supercoder": """# Tool Calling
12+
Call tools with <@TOOL>{"name": "<tool-name>", "arguments": <json-args>}</@TOOL>
13+
14+
**Example:**
15+
<@TOOL>{"name": "file-read", "arguments": {"path": "main.py"}}</@TOOL>
16+
17+
**Multiple tool calls:**
18+
You can call multiple tools in one response:
19+
<@TOOL>{"name": "file-read", "arguments": {"path": "file1.py"}}</@TOOL>
20+
<@TOOL>{"name": "file-read", "arguments": {"path": "file2.py"}}</@TOOL>
21+
""",
22+
23+
# Qwen-style format used by gpt-oss, deepresearch, and similar models
24+
"qwen_like": """# Tool Calling
25+
Call tools using this format:
26+
to=tool:<tool-name> <json-arguments>
27+
28+
**Example:**
29+
to=tool:file-read {"path": "main.py"}
30+
31+
**Available argument formats:**
32+
to=tool:code-edit {"file": "app.py", "operation": "create", "content": "print('hello')"}
33+
to=tool:command-exec {"command": "ls -la", "timeout": 30}
34+
to=tool:code-search {"query": "def main", "path": "."}
35+
to=tool:project-structure {"path": "."}
36+
37+
**Important:** Always use valid JSON for arguments. Use double quotes for strings.
38+
""",
39+
40+
# JSON code block format - common with many instruction-tuned models
41+
"json_block": """# Tool Calling
42+
Call tools using JSON code blocks:
43+
44+
```json
45+
{"tool": "<tool-name>", "arguments": {"arg1": "value1", "arg2": "value2"}}
46+
```
47+
48+
**Example:**
49+
```json
50+
{"tool": "file-read", "arguments": {"path": "main.py"}}
51+
```
52+
53+
**For code-edit:**
54+
```json
55+
{"tool": "code-edit", "arguments": {"file": "app.py", "operation": "create", "content": "print('hello')"}}
56+
```
57+
58+
**Important:** Use proper JSON formatting with double quotes.
59+
""",
60+
61+
# XML function call format
62+
"xml_function": """# Tool Calling
63+
Call tools using XML syntax:
64+
65+
<function_call name="<tool-name>">
66+
{"arg1": "value1", "arg2": "value2"}
67+
</function_call>
68+
69+
**Example:**
70+
<function_call name="file-read">
71+
{"path": "main.py"}
72+
</function_call>
73+
74+
**For code-edit:**
75+
<function_call name="code-edit">
76+
{"file": "app.py", "operation": "create", "content": "print('hello')"}
77+
</function_call>
78+
""",
79+
80+
# OpenAI-compatible function calling (for reference, though most use native)
81+
"openai_native": """# Tool Calling
82+
You have access to tools. When you need to use a tool, respond with a function call.
83+
The system will execute the tool and provide results.
84+
85+
Call tools by specifying the tool name and arguments in JSON format.
86+
""",
87+
}
88+
89+
# Valid tool calling types for validation
90+
VALID_TOOL_CALLING_TYPES = set(TOOL_CALLING_PROMPTS.keys())
91+
92+
93+
def get_tool_calling_prompt(tool_calling_type: str) -> str:
94+
"""Get tool calling instructions for the specified type.
95+
96+
Args:
97+
tool_calling_type: The type of tool calling format to use.
98+
Valid values: supercoder, qwen_like, json_block, xml_function
99+
100+
Returns:
101+
String containing tool calling instructions for the system prompt.
102+
Falls back to 'supercoder' format if type is unknown.
103+
"""
104+
return TOOL_CALLING_PROMPTS.get(tool_calling_type, TOOL_CALLING_PROMPTS["supercoder"])
105+
106+
107+
def get_available_types() -> list[str]:
108+
"""Get list of available tool calling types."""
109+
return list(TOOL_CALLING_PROMPTS.keys())

0 commit comments

Comments
 (0)