This guide covers using AgentSpec programmatically through its Python API.
pip install agentspecfrom agentspec import SpecGenerator, InstructionDatabase, TemplateManager, ContextDetector
# Initialize components
instruction_db = InstructionDatabase()
template_manager = TemplateManager()
context_detector = ContextDetector()
spec_generator = SpecGenerator(
instruction_db=instruction_db,
template_manager=template_manager,
context_detector=context_detector
)
# Generate a specification
from agentspec.core.spec_generator import SpecConfig
config = SpecConfig(
template_id="react_app",
output_format="markdown",
include_metadata=True
)
spec = spec_generator.generate_spec(config)
print(spec.content)Main class for generating specifications.
from agentspec import SpecGenerator
from agentspec.core.spec_generator import SpecConfig
class SpecGenerator:
def __init__(
self,
instruction_db: InstructionDatabase,
template_manager: TemplateManager,
context_detector: Optional[ContextDetector] = None
):
"""Initialize SpecGenerator with required dependencies."""
def generate_spec(self, config: SpecConfig) -> GeneratedSpec:
"""Generate a specification based on configuration."""
def export_spec(self, spec: GeneratedSpec, output_path: str) -> None:
"""Export specification to file."""
def validate_spec(self, spec: GeneratedSpec) -> ValidationResult:
"""Validate a generated specification."""# Basic generation
config = SpecConfig(template_id="python-api")
spec = spec_generator.generate_spec(config)
# With custom tags
config = SpecConfig(
selected_tags=["frontend", "testing", "security"],
output_format="json"
)
spec = spec_generator.generate_spec(config)
# With project context
config = SpecConfig(
template_id="react_app",
project_context=context_detector.analyze_project("./my-project")
)
spec = spec_generator.generate_spec(config)Manages loading and querying instructions.
from agentspec import InstructionDatabase
class InstructionDatabase:
def __init__(self, instructions_path: Optional[Path] = None):
"""Initialize with optional custom instructions path."""
def load_instructions(self) -> Dict[str, Instruction]:
"""Load all instructions from data files."""
def get_by_tags(self, tags: List[str]) -> List[Instruction]:
"""Get instructions matching any of the provided tags."""
def get_by_id(self, instruction_id: str) -> Optional[Instruction]:
"""Get instruction by ID."""
def get_all_tags(self) -> Set[str]:
"""Get all available tags."""
def search_instructions(self, query: str) -> List[Instruction]:
"""Search instructions by content."""# Load and query instructions
instruction_db = InstructionDatabase()
instructions = instruction_db.load_instructions()
# Get instructions by tags
testing_instructions = instruction_db.get_by_tags(["testing"])
security_instructions = instruction_db.get_by_tags(["secu
- `analyze_project(project_path: str) -> ProjectContext`
- `suggest_instructions(context: ProjectContext) -> List[InstructionSuggestion]`
### 5. AIBestPracticesIntegrator
Integrates AI development best practices into existing projects.
```python
from agentspec import AIBestPracticesIntegrator
# Initialize
integrator = AIBestPracticesIntegrator()
# Analyze project for AI integration opportunities
analysis = integrator.analyze_project("./my-project")
# Generate integration recommendations
recommendations = integrator.get_integration_recommendations(analysis)The TemplateManager class handles loading, validation, and recommendation of project templates.
from agentspec.core.template_manager import TemplateManager
class TemplateManager:
def __init__(self, templates_path: Optional[Path] = None,
schema_path: Optional[Path] = None)Loads all templates from JSON files and resolves inheritance.
Returns:
Dict[str, Template]: Dictionary mapping template IDs to Template objects
Example:
manager = TemplateManager()
templates = manager.load_templates()
print(f"Available templates: {list(templates.keys())}")Retrieves a specific template by ID.
Parameters:
template_id: ID of the template to retrieve
Returns:
Optional[Template]: Template object if found, None otherwise
Example:
react_template = manager.get_template("react_app")
if react_template:
print(f"Template: {react_template.name}")Gets template recommendations based on project context.
Parameters:
project_context: Dictionary containing project information
Returns:
List[TemplateRecommendation]: Sorted list of recommendations
Example:
context = {
"project_type": "web_frontend",
"technology_stack": ["react", "typescript"],
"files": ["package.json", "tsconfig.json"]
}
recommendations = manager.get_recommended_templates(context)
for rec in recommendations:
print(f"{rec.template.name}: {rec.confidence_score:.2f}")Creates a new template and saves it to file.
Parameters:
template: Template object to create
Returns:
str: Template ID
Raises:
ValueError: If template is invalid or ID already exists
Example:
new_template = Template(
id="custom_template",
name="Custom Template",
description="My custom template",
version="1.0.0",
project_type="web_frontend",
technology_stack=["custom"],
default_tags=["custom"]
)
template_id = manager.create_template(new_template)The ContextDetector class analyzes projects to detect technology stacks and suggest relevant instructions.
from agentspec.core.context_detector import ContextDetector
class ContextDetector:
def __init__(self)Performs comprehensive project analysis.
Parameters:
project_path: Path to the project directory
Returns:
ProjectContext: Complete project analysis results
Raises:
ValueError: If project path is invalid
Example:
detector = ContextDetector()
context = detector.analyze_project("./my-project")
print(f"Project type: {context.project_type.value}")
print(f"Confidence: {context.confidence_score:.2f}")Detects technology stack from project files.
Parameters:
project_path: Path to the project directory
Returns:
TechnologyStack: Detected technologies
Example:
stack = detector.detect_technology_stack("./my-project")
print(f"Languages: {[lang.value for lang in stack.languages]}")
print(f"Frameworks: {[fw.name for fw in stack.frameworks]}")Suggests relevant instructions based on project context.
Parameters:
context: Project context information
Returns:
List[InstructionSuggestion]: Sorted list of suggestions
Example:
suggestions = detector.suggest_instructions(context)
for suggestion in suggestions[:5]: # Top 5
print(f"{suggestion.instruction_id}: {suggestion.confidence:.2f}")The SpecGenerator class generates specifications from instructions and templates.
from agentspec.core.spec_generator import SpecGenerator, SpecConfig
class SpecGenerator:
def __init__(self, instruction_db: Optional[InstructionDatabase] = None,
template_manager: Optional[TemplateManager] = None,
context_detector: Optional[ContextDetector] = None)Generates a specification based on configuration.
Parameters:
config: SpecConfig with generation parameters
Returns:
GeneratedSpec: Generated specification with metadata
Raises:
ValueError: If configuration is invalid
Example:
generator = SpecGenerator()
config = SpecConfig(
selected_tags=["frontend", "testing"],
output_format="markdown"
)
spec = generator.generate_spec(config)
print(spec.content)Applies a template to create specification configuration.
Parameters:
template: Template to applycontext: Optional project context for customization
Returns:
SpecConfig: Configuration based on template
Example:
template = template_manager.get_template("react_app")
config = generator.apply_template(template, project_context)
spec = generator.generate_spec(config)Validates a generated specification.
Parameters:
spec: Generated specification to validate
Returns:
ValidationResult: Validation status and messages
Example:
result = generator.validate_spec(spec)
if not result.is_valid:
print(f"Validation errors: {result.errors}")
if result.warnings:
print(f"Warnings: {result.warnings}")Exports specification to file or returns as string.
Parameters:
spec: Generated specification to exportoutput_path: Optional file path to save
Returns:
str: Specification content
Example:
# Export to file
generator.export_spec(spec, "project_spec.md")
# Get as string
content = generator.export_spec(spec)The main CLI entry point provides command-line interface functionality.
from agentspec.cli.main import AgentSpecCLI
cli = AgentSpecCLI()
exit_code = cli.run(['generate', '--tags', 'frontend,testing'])Individual command handlers for specific CLI operations.
from agentspec.cli.commands import (
list_tags_command,
generate_spec_command,
analyze_project_command
)
# Use command handlers directly
result = list_tags_command(instruction_db, verbose=True)AgentSpec provides powerful fuzzy search capabilities for discovering instructions programmatically.
from agentspec import InstructionDatabase, SearchEngineFactory
# Create search engine
db = InstructionDatabase()
engine = SearchEngineFactory.create_default_engine(db)
# Search for instructions
results = engine.search("testing framework")
for match in results.matches:
print(f"{match.instruction.id}: {match.overall_score:.2f}")Factory class for creating configured search engines.
from agentspec import SearchEngineFactory, InstructionDatabase
class SearchEngineFactory:
@staticmethod
def create_default_engine(instruction_db: InstructionDatabase, **config_overrides) -> FuzzySearchEngine:
"""Create a search engine with default configuration."""
@staticmethod
def create_fast_engine(instruction_db: InstructionDatabase, **config_overrides) -> FuzzySearchEngine:
"""Create a search engine optimized for speed."""
@staticmethod
def create_accurate_engine(instruction_db: InstructionDatabase, **config_overrides) -> FuzzySearchEngine:
"""Create a search engine optimized for accuracy."""# Default engine (balanced speed and accuracy)
engine = SearchEngineFactory.create_default_engine(db)
# Fast engine (prioritizes speed)
fast_engine = SearchEngineFactory.create_fast_engine(db)
# Accurate engine (prioritizes accuracy)
accurate_engine = SearchEngineFactory.create_accurate_engine(db)
# Custom configuration
custom_engine = SearchEngineFactory.create_default_engine(
db,
similarity_threshold=0.5,
max_results=25
)Main search engine class providing comprehensive search capabilities.
from agentspec import FuzzySearchEngine, SearchConfig
class FuzzySearchEngine:
def __init__(self, instruction_db: InstructionDatabase, config: Optional[SearchConfig] = None):
"""Initialize the fuzzy search engine."""
def search(self, query: str, filters: Optional[SearchFilters] = None) -> SearchResults:
"""Perform fuzzy search across all instruction attributes."""
def search_by_attribute(self, query: str, attribute: str, filters: Optional[SearchFilters] = None) -> SearchResults:
"""Search within a specific attribute."""
def suggest_completions(self, partial_query: str) -> List[str]:
"""Provide real-time search suggestions."""# Simple search
results = engine.search("testing framework")
print(f"Found {len(results.matches)} matches in {results.execution_time:.3f}s")
for match in results.matches:
print(f"- {match.instruction.id} (score: {match.overall_score:.2f})")
for attr, detail in match.matched_attributes.items():
print(f" {attr}: {detail.matched_text} ({detail.similarity_score:.2f})")from agentspec import SearchFilters
# Create filters
filters = SearchFilters(
categories=["testing", "frontend"],
tags=["react", "jest"],
min_confidence=0.5,
max_results=10
)
# Search with filters
results = engine.search("unit testing", filters)# Search only in tags
results = engine.search_by_attribute("testing", "tags")
# Search only in content
results = engine.search_by_attribute("best practices", "content")
# Search only in categories
results = engine.search_by_attribute("frontend", "category")# Get suggestions for partial queries
suggestions = engine.suggest_completions("test")
print("Suggestions:", suggestions)
# Example output: ["testing", "test automation", "test driven development"]from agentspec import SearchConfig
# Custom search configuration
config = SearchConfig(
similarity_threshold=0.4, # Minimum similarity score
max_results=50, # Maximum results to return
enable_suggestions=True, # Enable search suggestions
algorithms=['levenshtein', 'fuzzy_ratio'], # Similarity algorithms
attribute_weights={ # Attribute importance weights
'id': 1.0,
'tags': 0.9,
'content': 0.7,
'category': 0.8
},
enable_cache=True, # Enable result caching
cache_size=100, # Cache size
cache_ttl_seconds=300 # Cache TTL (5 minutes)
)
engine = FuzzySearchEngine(db, config)from agentspec import SearchResults, SearchMatch, MatchDetail
# SearchResults structure
class SearchResults:
query: str # Original search query
matches: List[SearchMatch] # List of matching instructions
total_results: int # Total number of matches found
execution_time: float # Search execution time in seconds
suggestions: List[str] # Query suggestions
# SearchMatch structure
class SearchMatch:
instruction_id: str # Instruction identifier
instruction: Instruction # Full instruction object
matched_attributes: Dict[str, MatchDetail] # Attribute match details
overall_score: float # Overall relevance score
# MatchDetail structure
class MatchDetail:
attribute: str # Matched attribute name
matched_text: str # Text that matched
similarity_score: float # Similarity score (0.0-1.0)
algorithm_used: str # Algorithm used for matching
highlighted_text: str # Text with highlightsresults = engine.search("api security")
# Access basic information
print(f"Query: {results.query}")
print(f"Found {len(results.matches)} matches")
print(f"Execution time: {results.execution_time:.3f}s")
# Process matches
for match in results.matches:
instruction = match.instruction
print(f"\nInstruction: {instruction.id}")
print(f"Overall Score: {match.overall_score:.2f}")
print(f"Tags: {', '.join(instruction.tags)}")
# Show match details
for attr_name, detail in match.matched_attributes.items():
print(f" {attr_name}: {detail.matched_text} ({detail.similarity_score:.2f})")
# Use suggestions
if results.suggestions:
print(f"Suggestions: {', '.join(results.suggestions)}")# Update search index when instructions change
engine.update_instruction(modified_instruction)
# Batch update multiple instructions
engine.update_instructions_batch(instruction_dict)
# Rebuild entire index
engine.rebuild_index()# Validate index consistency
validation = engine.validate_index_consistency()
if not validation['valid']:
print("Index issues:", validation['issues'])
# Get cache statistics
stats = engine.get_cache_stats()
print(f"Cache size: {stats['size']}/{stats['max_size']}")from agentspec import SearchError, InvalidQueryError, IndexNotBuiltError
try:
results = engine.search("my query")
except InvalidQueryError as e:
print(f"Invalid query: {e}")
except IndexNotBuiltError as e:
print(f"Index not built: {e}")
engine.rebuild_index()
except SearchError as e:
print(f"Search error: {e}")from agentspec.utils.config import ConfigManager
manager = ConfigManager()
config = manager.load_config()
value = manager.get_config_value("agentspec.paths.instructions")import os
from agentspec.utils.logging import setup_logging
# Basic setup (console only)
setup_logging(log_level="INFO")
# Enable debug logging with file output
os.environ["AGENTSPEC_DEBUG_LOGGING"] = "1"
setup_logging(
log_level="DEBUG",
structured=True
)
# Custom log file (overrides environment-based logging)
setup_logging(
log_level="DEBUG",
log_file="custom_agentspec.log",
structured=True
)Note: File logging is only enabled when the AGENTSPEC_DEBUG_LOGGING environment variable is set, or when a custom log_file is specified. Log files are stored in the AgentSpec installation directory to avoid cluttering user workspaces.
AgentSpec provides comprehensive coverage integration utilities for measurement, reporting, and optimization:
from agentspec.utils.coverage_checker import CoverageChecker
# Initialize with thresholds
checker = CoverageChecker({
'global_threshold': 80,
'branch_threshold': 75,
'file_thresholds': {
'agentspec/core/critical.py': 95
}
})
# Check coverage and enforce thresholds
from coverage import Coverage
coverage = Coverage()
coverage.start()
# Run tests here
coverage.stop()
coverage.save()
passed = checker.check_coverage(coverage)
if not passed:
print("Coverage check failed!")from agentspec.utils.coverage_reporter import RichCoverageReporter
# Generate enhanced terminal reports
reporter = RichCoverageReporter(coverage)
total_coverage = reporter.report()
# Generate HTML reports with custom styling
from agentspec.utils.html_coverage_reporter import CustomHtmlReporter
html_reporter = CustomHtmlReporter(coverage, config)
html_reporter.generate_report()from agentspec.utils.coverage_analyzer import CoverageAnalyzer
# Analyze coverage quality and trends
analyzer = CoverageAnalyzer(coverage_data)
quality_metrics = analyzer.analyze_quality()
trend_data = analyzer.analyze_trends()
recommendations = analyzer.get_optimization_suggestions()from agentspec.utils.coverage_uploader import CoverageUploader, CoverageServiceConfig
# Configure services
config = CoverageServiceConfig(
codecov_token="your-codecov-token",
coveralls_token="your-coveralls-token",
github_token="your-github-token"
)
# Upload to external services
uploader = CoverageUploader(config)
results = uploader.upload_all()
# Generate coverage badge
badge_svg = uploader.generate_badge(coverage_percent)from agentspec.utils.coverage_optimizer import CoverageOptimizer
from agentspec.utils.parallel_coverage import ParallelCoverageManager
# Optimize coverage measurement
optimizer = CoverageOptimizer({
'selective_tracking': True,
'memory_optimization': True
})
optimized_coverage = optimizer.create_optimized_coverage(['agentspec'])
# Parallel coverage execution
manager = ParallelCoverageManager({'max_workers': 4})
merged_coverage = manager.run_parallel_coverage(test_files, ['agentspec'])from agentspec.utils.coverage_monitor import CoverageMonitor
from agentspec.utils.coverage_dashboard import CoverageDashboard
# Set up monitoring
monitor = CoverageMonitor({
'regression_threshold': 5.0,
'alert_channels': ['email', 'slack']
})
monitor.start_monitoring()
# Generate dashboard
dashboard = CoverageDashboard(coverage_history)
dashboard.generate_html_dashboard('coverage_dashboard.html')@dataclass
class Instruction:
id: str
version: str
tags: List[str]
content: str
conditions: Optional[List[Condition]] = None
parameters: Optional[List[Parameter]] = None
dependencies: Optional[List[str]] = None
metadata: Optional[InstructionMetadata] = None
language_variants: Optional[Dict[str, LanguageVariant]] = None@dataclass
class Template:
id: str
name: str
description: str
version: str
project_type: str
technology_stack: List[str]
default_tags: List[str]
required_instructions: List[str] = field(default_factory=list)
optional_instructions: List[str] = field(default_factory=list)
excluded_instructions: List[str] = field(default_factory=list)
parameters: Dict[str, TemplateParameter] = field(default_factory=dict)
inheritance: Optional[TemplateInheritance] = None
conditions: List[TemplateCondition] = field(default_factory=list)
metadata: Optional[TemplateMetadata] = None@dataclass
class ProjectContext:
project_path: str
project_type: ProjectType
technology_stack: TechnologyStack
dependencies: List[Dependency] = field(default_factory=list)
file_structure: FileStructure = field(default_factory=FileStructure)
git_info: Optional[GitInfo] = None
confidence_score: float = 0.0
metadata: Dict[str, Any] = field(default_factory=dict)from agentspec.core import (
InstructionDatabase, TemplateManager,
ContextDetector, SpecGenerator, SpecConfig
)
# Initialize components
instruction_db = InstructionDatabase()
template_manager = TemplateManager()
context_detector = ContextDetector()
spec_generator = SpecGenerator(
instruction_db=instruction_db,
template_manager=template_manager,
context_detector=context_detector
)
# Analyze project
project_context = context_detector.analyze_project("./my-project")
print(f"Detected: {project_context.project_type.value}")
# Get template recommendations
recommendations = template_manager.get_recommended_templates({
"project_type": project_context.project_type.value,
"technology_stack": [fw.name for fw in project_context.technology_stack.frameworks]
})
# Use best template
if recommendations:
template = recommendations[0].template
config = spec_generator.apply_template(template, project_context)
else:
# Manual configuration
config = SpecConfig(
selected_tags=["frontend", "testing", "security"],
project_context=project_context
)
# Generate specification
spec = spec_generator.generate_spec(config)
# Validate and export
validation = spec_generator.validate_spec(spec)
if validation.is_valid:
spec_generator.export_spec(spec, "project_spec.md")
print("Specification generated successfully!")
else:
print(f"Validation errors: {validation.errors}")from agentspec.core.instruction_database import (
Instruction, InstructionMetadata, Condition, Parameter
)
# Create custom instruction
custom_instruction = Instruction(
id="custom_react_testing",
version="1.0.0",
tags=["react", "testing", "custom"],
content="Implement comprehensive React testing with {test_framework}.",
conditions=[
Condition(
type="technology",
value="react",
operator="equals"
)
],
parameters=[
Parameter(
name="test_framework",
type="string",
default="jest",
description="Testing framework to use"
)
],
metadata=InstructionMetadata(
category="testing",
priority=8,
author="custom_author"
)
)
# Validate instruction
db = InstructionDatabase()
result = db.validate_instruction(custom_instruction)
if result.is_valid:
print("Custom instruction is valid!")All AgentSpec APIs use consistent error handling patterns:
from agentspec.core.instruction_database import InstructionDatabase
from agentspec.core.exceptions import AgentSpecError, ValidationError
try:
db = InstructionDatabase()
instructions = db.load_instructions()
except FileNotFoundError as e:
print(f"Instructions directory not found: {e}")
except ValidationError as e:
print(f"Validation failed: {e}")
except AgentSpecError as e:
print(f"AgentSpec error: {e}")
except Exception as e:
print(f"Unexpected error: {e}")For large instruction databases or project analysis:
# Use lazy loading
db = InstructionDatabase()
# Instructions loaded on first access
instructions = db.load_instructions()
# Cache results for repeated queries
cached_instructions = db.get_by_tags(["frontend"])# Clear caches when needed
db.reload() # Reloads from files
template_manager.reload() # Reloads templatesAgentSpec components are thread-safe for read operations:
import threading
def analyze_project(path):
detector = ContextDetector()
return detector.analyze_project(path)
# Safe to run concurrently
threads = [
threading.Thread(target=analyze_project, args=(path,))
for path in project_paths
]This API documentation provides comprehensive coverage of AgentSpec's Python interface. For more examples and advanced usage patterns, see the examples directory in the repository.