diff --git a/.github/workflows/ai-review.yml b/.github/workflows/ai-review.yml
new file mode 100644
index 0000000..d4f3944
--- /dev/null
+++ b/.github/workflows/ai-review.yml
@@ -0,0 +1,468 @@
+name: GitHub Models PR Review
+
+on:
+ pull_request:
+ branches:
+ - init-proj
+ workflow_dispatch:
+
+permissions:
+ contents: read
+ pull-requests: write
+ issues: write
+
+jobs:
+ pr_review:
+ name: AI-Powered PR Review with GitHub Models
+ runs-on: ubuntu-latest
+ if: github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch'
+
+ steps:
+ - name: Checkout Repository
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Get PR diff
+ run: |
+ git fetch origin ${{ github.base_ref }}
+ git diff origin/${{ github.base_ref }}...HEAD > pr_diff.txt
+
+ - name: Smart file prioritization with 6700 token limit
+ run: |
+ DIFF_SIZE=$(wc -c < pr_diff.txt)
+ ESTIMATED_TOKENS=$((DIFF_SIZE / 4))
+
+ # Token limits optimized for gpt-4.1-nano
+ MAX_DIFF_TOKENS=6700
+ SAFE_DIFF_TOKENS=6000 # Conservative buffer
+
+ echo "Original diff size: $DIFF_SIZE bytes (~$ESTIMATED_TOKENS tokens)"
+ echo "Max allowed tokens: $MAX_DIFF_TOKENS"
+
+ if [ $ESTIMATED_TOKENS -gt $MAX_DIFF_TOKENS ]; then
+ echo "⚠️ Diff exceeds token limits, applying smart file prioritization..."
+
+ # Get changed files with detailed analysis
+ git diff origin/${{ github.base_ref }}...HEAD --name-only > all_changed_files.txt
+
+ # Advanced file priority and size analysis
+ > priority_analysis.txt
+ while read -r file; do
+ if [ -n "$file" ] && [ -f "$file" ]; then
+ FILE_DIFF_SIZE=$(git diff origin/${{ github.base_ref }}...HEAD -- "$file" | wc -c)
+ FILE_TOKENS=$((FILE_DIFF_SIZE / 4))
+
+ # Advanced priority scoring system
+ PRIORITY=5 # Default lowest priority
+ CATEGORY="Other"
+
+ # Critical priority (1) - Core application files
+ if echo "$file" | grep -qE '\.(js|jsx|ts|tsx|py|java|go|rs|php|rb|swift|kt|scala|cs|cpp|c|h|hpp)$' && ! echo "$file" | grep -qE '(test|spec|mock)'; then
+ PRIORITY=1
+ CATEGORY="Core Code"
+ # High priority (2) - Configuration and security
+ elif echo "$file" | grep -qE 'package\.json|package-lock\.json|requirements\.txt|Dockerfile|docker-compose\.yml|\.env|\.env\.|config\.|\.config|\.yml$|\.yaml$|\.toml$|\.ini$|\.conf$|Makefile|CMakeLists\.txt'; then
+ PRIORITY=2
+ CATEGORY="Configuration"
+ # Medium-High priority (3) - Tests and critical docs
+ elif echo "$file" | grep -qE '\.(test\.|spec\.|_test\.py|test_.*\.py)$|README|CHANGELOG|LICENSE'; then
+ PRIORITY=3
+ CATEGORY="Tests/Docs"
+ # Medium priority (4) - Styles and medium docs
+ elif echo "$file" | grep -qE '\.(css|scss|sass|less|styl|md|rst|txt)$'; then
+ PRIORITY=4
+ CATEGORY="Styles/Docs"
+ fi
+
+ echo "$PRIORITY:$FILE_TOKENS:$CATEGORY:$file" >> priority_analysis.txt
+ fi
+ done < all_changed_files.txt
+
+ # Sort by priority (1=critical, 5=low) then by size (smaller first for better coverage)
+ sort -t: -k1,1n -k2,2n priority_analysis.txt > sorted_files.txt
+
+ # Build focused diff with smart token management
+ echo "# Focused Code Review - Smart File Prioritization" > focused_diff.txt
+ echo "" >> focused_diff.txt
+ echo "**Analysis Strategy:** Prioritizing critical code files, configurations, and tests within token constraints." >> focused_diff.txt
+ echo "" >> focused_diff.txt
+
+ CURRENT_TOKENS=150 # Account for headers and metadata
+ FILES_INCLUDED=0
+ declare -A CATEGORY_COUNT
+
+ echo "## Files Included in Review:" >> focused_diff.txt
+ echo "" >> focused_diff.txt
+
+ while IFS=':' read -r priority tokens category filepath; do
+ if [ -n "$filepath" ] && [ $((CURRENT_TOKENS + tokens)) -lt $SAFE_DIFF_TOKENS ]; then
+ echo "### $category: \`$filepath\`" >> focused_diff.txt
+ echo "**Priority:** $priority | **Estimated Impact:** $tokens tokens" >> focused_diff.txt
+ echo "" >> focused_diff.txt
+ git diff origin/${{ github.base_ref }}...HEAD -- "$filepath" >> focused_diff.txt
+ echo "" >> focused_diff.txt
+ echo "---" >> focused_diff.txt
+ echo "" >> focused_diff.txt
+
+ CURRENT_TOKENS=$((CURRENT_TOKENS + tokens))
+ FILES_INCLUDED=$((FILES_INCLUDED + 1))
+ CATEGORY_COUNT[$category]=$((${CATEGORY_COUNT[$category]} + 1))
+
+ echo "✓ Included $filepath [$category] ($tokens tokens, running total: $CURRENT_TOKENS)"
+ else
+ echo "✗ Skipped $filepath [$category] ($tokens tokens) - would exceed limit"
+ fi
+ done < sorted_files.txt
+
+ # Add summary footer
+ echo "" >> focused_diff.txt
+ echo "## Review Scope Summary" >> focused_diff.txt
+ echo "| Category | Files Reviewed |" >> focused_diff.txt
+ echo "|----------|----------------|" >> focused_diff.txt
+ for category in "${!CATEGORY_COUNT[@]}"; do
+ echo "| $category | ${CATEGORY_COUNT[$category]} |" >> focused_diff.txt
+ done
+ echo "" >> focused_diff.txt
+ echo "**Total Files:** $FILES_INCLUDED | **Token Usage:** ~$CURRENT_TOKENS/$MAX_DIFF_TOKENS" >> focused_diff.txt
+
+ mv focused_diff.txt pr_diff.txt
+
+ NEW_SIZE=$(wc -c < pr_diff.txt)
+ NEW_TOKENS=$((NEW_SIZE / 4))
+ echo "✅ Prioritized diff: $NEW_SIZE bytes (~$NEW_TOKENS tokens)"
+
+ else
+ echo "✅ Diff size within limits, proceeding with full review"
+ fi
+
+ # Final safety check with hard limits
+ FINAL_SIZE=$(wc -c < pr_diff.txt)
+ FINAL_TOKENS=$((FINAL_SIZE / 4))
+
+ if [ $FINAL_TOKENS -gt $SAFE_DIFF_TOKENS ]; then
+ echo "⚠️ Applying final safety truncation..."
+ # Hard limit: 5000 tokens * 4 chars/token = 20000 chars
+ head -c 20000 pr_diff.txt > truncated.txt
+ echo "" >> truncated.txt
+ echo "--- DIFF TRUNCATED: REACHED SAFE TOKEN LIMIT ---" >> truncated.txt
+ echo "**Remaining files require separate review**" >> truncated.txt
+ mv truncated.txt pr_diff.txt
+ echo "Safety truncated to: $(wc -c < pr_diff.txt) bytes"
+ fi
+
+ # Cleanup temporary files
+ rm -f all_changed_files.txt priority_analysis.txt sorted_files.txt
+
+ - name: Review PR with GitHub Models (GPT-4.1-nano)
+ id: review_with_github_models
+ if: github.event_name == 'pull_request'
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const fs = require('fs');
+
+ let prDiff = '';
+ try {
+ prDiff = fs.readFileSync('pr_diff.txt', 'utf8');
+ } catch (error) {
+ console.log('No diff file found or empty diff');
+ return;
+ }
+
+ if (!prDiff.trim()) {
+ console.log('Empty diff, skipping review');
+ return;
+ }
+
+ // Comprehensive prompt with code example requirements
+ const prompt = `You are an expert code reviewer. Analyze the provided git diff and deliver a comprehensive, professional code review following the exact structure below.
+
+ ### FORMATTING REQUIREMENTS:
+ - Use proper markdown with clear sections
+ - Include specific code snippets with language tags
+ - Provide concrete examples for improvements
+ - Use tables for structured findings like example :
+
+ 📂 Click to expand issue table
+ | Category | Issue Description | Location (File:Line) | Severity | Recommendation |
+ |----------|-------------------|----------------------|----------|----------------|
+ | Example | Description here | file.js:42 | High | Specific fix |
+
+ - Reference specific file locations
+ - Professional tone, no emojis
+
+ ### REQUIRED STRUCTURE:
+
+ ## Code Review Summary
+ Brief overview of changes and overall quality assessment.
+
+ ## Critical Issues
+ List high-priority issues requiring immediate attention with clear impact explanations.
+
+ ## Code Quality Analysis
+
+ ### Security Concerns
+ Identify security issues with code examples and explanations.
+
+ ### Performance Issues
+ Highlight performance problems with optimization suggestions.
+
+ ### Best Practices
+ Note coding standard violations and improvement opportunities.
+
+ ## Detailed Findings
+
+
+ 📂 Click to expand issue table
+
+ | Category | Issue Description | Location (File:Line) | Severity | Recommendation |
+ |----------|-------------------|----------------------|----------|----------------|
+ | Example | Description here | file.js:42 | High | Specific fix |
+
+
+
+ ## Code Examples
+
+ ### Current Implementation
+ Show problematic code snippets with explanations of why they're issues.
+
+ \`\`\`javascript
+ // Current problematic code
+ const example = "show actual code from diff";
+ \`\`\`
+
+ **Issue:** Explain what's wrong with this code.
+
+ ### Suggested Improvements
+ Present corrected versions with detailed explanations:
+
+ \`\`\`javascript
+ // Improved version
+ const betterExample = "show how to fix it";
+ // Add validation, error handling, etc.
+ \`\`\`
+
+ **Improvement:** Explain why this is better and what benefits it provides.
+
+ ## Testing Recommendations
+ Specific test suggestions for the changes.
+
+ ## Documentation Notes
+ Documentation improvements or additions needed.
+
+ ---
+
+ Here is the diff to review:
+ \`\`\`diff
+ ${prDiff}
+ \`\`\``;
+
+ try {
+ const response = await fetch('https://models.github.ai/inference/chat/completions', {
+ method: 'POST',
+ headers: {
+ 'Authorization': `Bearer ${{ secrets.GH_PAT_MODELS }}`,
+ 'Content-Type': 'application/json',
+ 'User-Agent': 'GitHub-Actions-PR-Review/1.0'
+ },
+ body: JSON.stringify({
+ messages: [
+ {
+ role: "system",
+ content: "You are a senior software engineer conducting thorough code reviews. Provide detailed, actionable feedback with proper markdown formatting, concrete code examples, and structured recommendations. Focus on security, performance, maintainability, and best practices. Always include before/after code examples for suggested improvements."
+ },
+ {
+ role: "user",
+ content: prompt
+ }
+ ],
+ model: "gpt-4.1-nano",
+ temperature: 0.1,
+ max_tokens: 4000
+ })
+ });
+
+ if (!response.ok) {
+ const errorText = await response.text();
+
+ // Graceful fallback for token limits or API errors
+ if (response.status === 413 || errorText.includes('tokens_limit_reached') || errorText.includes('too large')) {
+ console.log('Triggering graceful fallback due to size limits...');
+
+ // Create comprehensive fallback review
+ const changedFiles = prDiff.split('\n')
+ .filter(line => line.startsWith('diff --git'))
+ .map(line => {
+ const match = line.match(/b\/(.+)/);
+ return match ? match[1] : 'Unknown';
+ })
+ .slice(0, 15);
+
+ const addedLines = prDiff.split('\n').filter(line => line.startsWith('+')).length;
+ const removedLines = prDiff.split('\n').filter(line => line.startsWith('-')).length;
+
+ const fallbackReview = `## AI Code Review - Large Diff Analysis
+
+ This pull request contains extensive changes that exceed optimal token limits for detailed AI analysis. However, here's a comprehensive review strategy:
+
+ ### Change Overview
+ - **Files Modified:** ${changedFiles.length}${changedFiles.length >= 15 ? '+' : ''}
+ - **Lines Added:** ~${addedLines}
+ - **Lines Removed:** ~${removedLines}
+ - **Diff Size:** ${Math.floor(prDiff.length / 1024)}KB (~${Math.floor(prDiff.length / 4)} tokens)
+
+ ### Files Requiring Manual Review
+
+ #### High Priority Files
+ ${changedFiles.filter(f => f.match(/\.(js|jsx|ts|tsx|py|java|go|rs|php|rb)$/)).map(f => `- \`${f}\` - Core application logic`).join('\n')}
+
+ #### Configuration Files
+ ${changedFiles.filter(f => f.match(/package\.json|requirements\.txt|Dockerfile|\.env|config/)).map(f => `- \`${f}\` - System configuration`).join('\n')}
+
+ #### Other Files
+ ${changedFiles.filter(f => !f.match(/\.(js|jsx|ts|tsx|py|java|go|rs|php|rb)$/) && !f.match(/package\.json|requirements\.txt|Dockerfile|\.env|config/)).map(f => `- \`${f}\``).join('\n')}
+
+ ### Recommended Manual Review Checklist
+
+ #### 🔒 Security Review
+ - [ ] Check for hardcoded secrets, API keys, or passwords
+ - [ ] Validate input sanitization and XSS prevention
+ - [ ] Review authentication and authorization changes
+ - [ ] Examine SQL queries for injection vulnerabilities
+ - [ ] Verify secure communication protocols
+
+ #### ⚡ Performance Review
+ - [ ] Look for inefficient database queries or N+1 problems
+ - [ ] Check for memory leaks in loops or event handlers
+ - [ ] Review caching strategies and implementation
+ - [ ] Examine bundle size impact for frontend changes
+ - [ ] Validate algorithm complexity for data processing
+
+ #### 🧪 Testing Requirements
+ - [ ] Ensure unit tests cover new functionality
+ - [ ] Add integration tests for API changes
+ - [ ] Update end-to-end tests for UI modifications
+ - [ ] Verify edge cases and error scenarios are tested
+ - [ ] Check test coverage metrics
+
+ #### 📚 Code Quality Standards
+ - [ ] Consistent naming conventions and code style
+ - [ ] Proper error handling and logging
+ - [ ] Documentation and comments for complex logic
+ - [ ] Dependencies are necessary and up-to-date
+ - [ ] No dead code or unused imports
+
+ #### 🔄 Breaking Changes Assessment
+ - [ ] API compatibility with existing integrations
+ - [ ] Database migration safety and rollback plans
+ - [ ] Configuration changes deployment strategy
+ - [ ] Backward compatibility for public interfaces
+
+ ### Suggested Review Strategy
+
+ 1. **Start with Critical Files:** Focus on core business logic and security-sensitive components
+ 2. **Review in Small Chunks:** Break down the review by functional area or file type
+ 3. **Use Static Analysis:** Run linters, security scanners, and code quality tools
+ 4. **Test Thoroughly:** Deploy to staging environment and run comprehensive tests
+ 5. **Pair Review:** Consider pair programming sessions for complex changes
+
+ ### Next Steps
+ - Consider breaking this large PR into smaller, focused pull requests
+ - Run automated security and quality scans
+ - Schedule dedicated review sessions with team members
+ - Ensure proper staging environment testing before merge
+
+ ---
+ **Review Status:** Manual review required due to diff complexity
+ **Generated:** ${new Date().toLocaleString()}
+ **Fallback Reason:** ${response.status === 413 ? 'Token limit exceeded' : 'API error encountered'}
+
+ > 💡 **Tip:** For better AI reviews in the future, consider smaller, more focused pull requests (< 1000 lines changed).`;
+
+ await github.rest.issues.createComment({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body: fallbackReview
+ });
+
+ console.log('✅ Graceful fallback review posted successfully');
+ return;
+ }
+
+ throw new Error(`GitHub Models API error: ${response.status} ${response.statusText} - ${errorText}`);
+ }
+
+ const data = await response.json();
+ const reviewContent = data.choices[0].message.content;
+
+ // Post the review content directly without metadata wrapper
+ await github.rest.issues.createComment({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body: reviewContent
+ });
+
+ console.log('✅ Comprehensive PR review posted successfully');
+
+ } catch (error) {
+ console.error('Error in GitHub Models API call:', error);
+
+ // Enhanced error reporting
+ const errorReport = `## ❌ AI Code Review - Error Occurred
+
+ An error occurred while generating the automated code review.
+
+ ### Error Details
+ \`\`\`
+ ${error.message}
+ \`\`\`
+
+ ### Manual Review Required
+ Please proceed with manual code review using these guidelines:
+
+ #### Quick Review Checklist
+ - **Security:** Check for vulnerabilities and exposed credentials
+ - **Performance:** Look for inefficient code patterns
+ - **Testing:** Ensure adequate test coverage
+ - **Documentation:** Verify code is properly documented
+ - **Standards:** Confirm adherence to team coding standards
+
+ #### Troubleshooting
+ - Check workflow logs for detailed error information
+ - Verify GitHub Models API token permissions
+ - Consider reducing diff size if token limits were exceeded
+
+ ---
+ **Error Time:** ${new Date().toLocaleString()}
+ **Suggested Action:** Manual review and investigate workflow configuration`;
+
+ await github.rest.issues.createComment({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body: errorReport
+ });
+
+ core.setFailed(`Failed to generate AI review: ${error.message}`);
+ }
+
+ - name: Display final diff statistics
+ if: always()
+ run: |
+ if [ -f pr_diff.txt ]; then
+ FINAL_SIZE=$(wc -c < pr_diff.txt)
+ FINAL_TOKENS=$((FINAL_SIZE / 4))
+ LINES=$(wc -l < pr_diff.txt)
+
+ echo "=== FINAL DIFF STATISTICS ==="
+ echo "📊 Size: $FINAL_SIZE bytes"
+ echo "🎯 Estimated tokens: $FINAL_TOKENS"
+ echo "📝 Lines: $LINES"
+ echo "✅ Token limit compliance: $([ $FINAL_TOKENS -le 6700 ] && echo "PASSED" || echo "EXCEEDED")"
+ echo "=========================="
+ else
+ echo "❌ No diff file found"
+ fi
\ No newline at end of file
diff --git a/.github/workflows/static-analysis.yml b/.github/workflows/static-analysis.yml
new file mode 100644
index 0000000..66b8818
--- /dev/null
+++ b/.github/workflows/static-analysis.yml
@@ -0,0 +1,256 @@
+#==============================================================================
+# STATIC ANALYSIS WORKFLOW
+#==============================================================================
+# This workflow performs automated static code analysis on Go and JavaScript/JSX
+# files when pull requests are made to the dev-review-setup branch.
+#
+# Key Features:
+# - Intelligent path filtering to only analyze changed directories
+# - Dynamic matrix strategy for parallel execution across multiple projects
+# - Integration with reviewdog for inline PR comments
+# - Automatic dependency management for both Go and JS projects
+# - Concurrency control to prevent resource conflicts
+#==============================================================================
+
+name: Static Analysis
+
+# permissions:
+# contents: read
+# pull-requests: write
+# checks: write
+# statuses: write
+
+permissions: write-all
+
+
+#==============================================================================
+# WORKFLOW TRIGGERS
+#==============================================================================
+# This workflow is triggered on pull request events targeting the
+# dev-review-setup branch. It only runs when specific file types are modified
+# to optimize CI/CD resource usage.
+#==============================================================================
+on:
+ pull_request:
+ branches: [ dev-review-setup ]
+ # Trigger on these PR events to catch all code changes
+ types: [opened, synchronize, reopened, edited]
+ # Path-based filtering to prevent unnecessary runs
+ paths:
+ - '**.go' # Go source files
+ - '**.js' # JavaScript files
+ - '**.jsx' # React JSX files
+ - '.github/workflows/golangci-lint.yml' # This workflow file itself
+
+#==============================================================================
+# CONCURRENCY CONTROL
+#==============================================================================
+# Prevents multiple instances of this workflow from running simultaneously
+# on the same PR or branch, canceling older runs to save resources
+#==============================================================================
+concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.sha }}
+ cancel-in-progress: true
+
+jobs:
+ #============================================================================
+ # JOB 1: CHANGE DETECTION AND MATRIX PREPARATION
+ #============================================================================
+ # This job determines which directories contain changed files and builds
+ # dynamic matrices for the subsequent linting jobs. This approach ensures
+ # we only run static analysis on projects that have actual changes.
+ #============================================================================
+ filter:
+ name: Determine Changed Directories
+ runs-on: ubuntu-latest
+
+ # Output matrices that will be consumed by downstream jobs
+ outputs:
+ go_matrix: ${{ steps.matrix_builder.outputs.go_matrix }} # Array of Go project directories
+ js_matrix: ${{ steps.matrix_builder.outputs.js_matrix }} # Array of JS project directories
+
+ steps:
+ #------------------------------------------------------------------------
+ # Checkout the repository code to analyze file changes
+ #------------------------------------------------------------------------
+ - name: Check out code
+ uses: actions/checkout@v4
+
+ #------------------------------------------------------------------------
+ # Use dorny/paths-filter to detect changes in specific directory patterns
+ # This action compares the current PR against the target branch
+ #------------------------------------------------------------------------
+ - name: Use paths-filter action
+ uses: dorny/paths-filter@v3
+ id: changes
+ with:
+ # Return list of changed files as JSON for processing
+ list-files: 'json'
+ filters: |
+ go:
+ - 'static-code-go/**'
+ - 'echo-mysql/**'
+ - 'echo-sql/**'
+ - 'fasthttp-postgres/**'
+ - 'gin-mongo/**'
+ - 'gin-redis/**'
+ - 'go-grpc/**'
+ - 'go-jwt/**'
+ - 'go-twilio/**'
+ - 'graphql-sql/**'
+ - 'http-pokeapi/**'
+ - 'mux-elasticsearch/**'
+ - 'mux-mysql/**'
+ - 'mux-sql/**'
+ - 'S3-Keploy/**'
+ - 'sse-svelte/**'
+ - 'users-profile/**'
+ - 'book-store-inventory/**'
+
+ js:
+ - 'static-code-js/**'
+
+ #------------------------------------------------------------------------
+ # Build dynamic matrices from the changed file lists
+ # This script extracts unique directory names from changed file paths
+ # and formats them as JSON arrays for use in matrix strategies
+ # This reduces unnecessary runs and speeds up CI/CD and saves a lot of resources
+ #------------------------------------------------------------------------
+ - name: Build Matrix from changed files
+ id: matrix_builder
+ if: steps.changes.outputs.go == 'true' || steps.changes.outputs.js == 'true'
+ run: |
+ # Process Go files: extract root directory names and create unique JSON array
+ # Example: "echo-mysql/main.go" -> "echo-mysql"
+ go_dirs=$(echo '${{ steps.changes.outputs.go_files }}' | jq -r '[.[] | split("/")[0]] | unique | tojson')
+ echo "go_matrix=$go_dirs" >> $GITHUB_OUTPUT
+
+ # Process JavaScript files: extract root directory names and create unique JSON array
+ # Example: "frontend/src/app.js" -> "frontend"
+ js_dirs=$(echo '${{ steps.changes.outputs.js_files }}' | jq -r '[.[] | split("/")[0]] | unique | tojson')
+ echo "js_matrix=$js_dirs" >> $GITHUB_OUTPUT
+
+ #============================================================================
+ # JOB 2: GO STATIC ANALYSIS
+ #============================================================================
+ # Runs golangci-lint on each Go project directory that contains changes.
+ # Uses reviewdog to provide inline PR comments for any linting issues found.
+ #============================================================================
+ lint_and_fix_go:
+ name: Go Static Analysis
+ needs: filter # Wait for change detection
+ if: ${{ needs.filter.outputs.go_matrix != '[]' }} # Only run if Go changes detected
+ runs-on: ubuntu-latest
+
+
+ # Matrix strategy: run this job in parallel for each changed Go directory
+ strategy:
+ fail-fast: false # Continue running other matrix jobs even if one fails
+ matrix:
+ working-directory: ${{ fromJSON(needs.filter.outputs.go_matrix) }}
+
+ steps:
+ #------------------------------------------------------------------------
+ # Checkout with full git history for accurate diff analysis
+ #------------------------------------------------------------------------
+ - name: Check out code into the Go module directory
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0 # Full history needed for proper linting context
+
+ #------------------------------------------------------------------------
+ # Ensure Go module is properly initialized and dependencies are resolved
+ # This step handles cases where go.mod might be missing or outdated
+ # It is required by reviewdog with Golangci-lint
+ #------------------------------------------------------------------------
+ - name: Ensure Go module exists and tidy
+ run: |
+ # Initialize go.mod if it doesn't exist (defensive programming)
+ if [ ! -f go.mod ]; then
+ go mod init github.com/${{ github.repository }}
+ fi
+ # Resolve and cleanup dependencies
+ go mod tidy
+ working-directory: ./${{ matrix.working-directory }}
+
+ #------------------------------------------------------------------------
+ # Run golangci-lint with reviewdog integration
+ # This provides inline PR comments for any issues found
+ #------------------------------------------------------------------------
+ - name: Run lint and report issues
+ uses: reviewdog/action-golangci-lint@v2
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ reporter: github-pr-review # Post comments on PR
+ workdir: ./${{ matrix.working-directory }}
+ fail_level: warning # Fail on warnings and above
+
+ #============================================================================
+ # JOB 3: JAVASCRIPT/JSX STATIC ANALYSIS
+ #============================================================================
+ # Runs ESLint on each JavaScript/JSX project directory that contains changes.
+ # Automatically sets up Node.js environment and ESLint configuration if needed.
+ #============================================================================
+ lint_js:
+ name: JavaScript Static Analysis
+ needs: filter # Wait for change detection
+ if: ${{ needs.filter.outputs.js_matrix != '[]' }} # Only run if JS changes detected
+ runs-on: ubuntu-latest
+
+
+ # Matrix strategy: run this job in parallel for each changed JS directory
+ strategy:
+ fail-fast: false # Continue running other matrix jobs even if one fails
+ matrix:
+ working-directory: ${{ fromJSON(needs.filter.outputs.js_matrix) }}
+
+ steps:
+ #------------------------------------------------------------------------
+ # Checkout repository code
+ #------------------------------------------------------------------------
+ - uses: actions/checkout@v4
+
+ #------------------------------------------------------------------------
+ # Setup Node.js environment with LTS version
+ # Using version 18 for stability and wide compatibility
+ #------------------------------------------------------------------------
+ - name: Set up Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: '18'
+
+ #------------------------------------------------------------------------
+ # Ensure ESLint environment is properly configured
+ # This step handles projects that might not have ESLint setup yet
+ #------------------------------------------------------------------------
+ - name: Ensure package.json and eslint.config.mjs exist
+ run: |
+ # Create package.json if missing (defensive programming)
+ if [ ! -f package.json ]; then
+ npm init -y # Create with defaults
+ npm install eslint --save-dev # Install ESLint as dev dependency
+ fi
+
+ # Create basic ESLint configuration if missing
+ # Using flat config format (ESLint 9+) with essential rules
+ if [ ! -f eslint.config.mjs ]; then
+ echo "export default [{ rules: { semi: 'error', quotes: ['error', 'single'] } }];" > eslint.config.mjs
+ fi
+ working-directory: ./${{ matrix.working-directory }}
+
+ #------------------------------------------------------------------------
+ # Run ESLint with reviewdog integration
+ # This provides inline PR comments for any JavaScript/JSX issues found
+ #------------------------------------------------------------------------
+ - name: Run ESLint via reviewdog (PR review)
+ uses: reviewdog/action-eslint@v1
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ reporter: github-pr-review # Post comments on PR
+ # ESLint flags explanation:
+ # --config: Use our generated flat config file
+ # **/*.{js,jsx,ts,tsx}: Lint all JS/JSX/TS/TSX files recursively
+ # --no-error-on-unmatched-pattern: Don't fail if no files match pattern
+ eslint_flags: '--config eslint.config.mjs **/*.{js,jsx,ts,tsx} --no-error-on-unmatched-pattern'
+ fail_level: error
+ workdir: ./${{ matrix.working-directory }}
\ No newline at end of file
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 0000000..40dfc46
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,26 @@
+linters:
+ enable:
+ - gofmt
+ - goimports
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - staticcheck
+ - unused
+ - errcheck
+
+linters-settings:
+ gofmt:
+ simplify: true
+ goimports:
+ local-prefixes: github.com/keploy/code-review-agent
+
+issues:
+ exclude-rules:
+ - path: _test\.go
+ linters:
+ - errcheck
+
+run:
+ timeout: 5m
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..85cd465
--- /dev/null
+++ b/README.md
@@ -0,0 +1,232 @@
+# Code Review Agent
+
+
+## Documentation: GitHub Models PR Review Workflow
+
+## Overview
+
+This GitHub Actions workflow is designed to automate **pull request (PR) code reviews** using **GitHub-hosted LLMs** (specifically `gpt-4.1-nano`) via the GitHub Models API. It uses:
+
+* Smart diff file prioritization to manage token limits.
+* Markdown-rich prompts for structured review.
+* Graceful fallbacks for oversized diffs.
+
+## Objective
+
+To enhance pull request quality checks by automatically generating detailed, structured code reviews that:
+
+* Flag security, performance, and maintainability issues
+* Recommend improvements with code snippets
+* Provide clear summaries and checklists
+
+---
+
+## Workflow Triggers
+
+| Trigger | Description |
+| ------------------- | --------------------------------------------- |
+| `pull_request` | Runs on PRs targeting the `init-proj` branch. |
+| `workflow_dispatch` | Allows manual triggering from GitHub UI. |
+
+```yaml
+ame: GitHub Models PR Review
+on:
+ pull_request:
+ branches:
+ - init-proj
+ workflow_dispatch:
+```
+
+---
+
+## Permissions
+
+This workflow needs to:
+
+* Read the repo content
+* Comment on PRs (`pull-requests: write`)
+* Create issues if needed
+* PAT (Personal Access Token) with Github Models `read-only` permission with `GH_PAT_MODELS` secret for API access
+
+```yaml
+permissions:
+ contents: read
+ pull-requests: write
+ issues: write
+```
+
+---
+
+## Job: `pr_review`
+
+| Property | Value |
+| --------------- | ------------------ |
+| Runs on | `ubuntu-latest` |
+| Conditional Run | PR or manual event |
+
+### Step 1: Checkout
+
+Fetches the full repository history:
+
+```yaml
+- name: Checkout Repository
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+```
+
+### Step 2: Generate Diff
+
+Creates a diff from base branch to HEAD and saves to `pr_diff.txt`:
+
+```bash
+git fetch origin ${{ github.base_ref }}
+git diff origin/${{ github.base_ref }}...HEAD > pr_diff.txt
+```
+
+---
+
+## Step 3: Smart Diff Prioritization
+
+Handles token limits (OpenAI models have token size limits):
+
+### Logic:
+
+| Condition | Action |
+| ------------------------- | ------------------------------ |
+| Diff within token limit | Use full diff |
+| Diff exceeds token limit | Prioritize critical files only |
+| Still exceeds safe tokens | Truncate file to \~5000 tokens |
+
+### Categories Used:
+
+| Priority | Category | File Types |
+| -------- | ------------- | ---------------------------------- |
+| 1 | Core Code | `.js`, `.py`, `.java`, etc. |
+| 2 | Configuration | `Dockerfile`, `.env`, `.yml`, etc. |
+| 3 | Tests/Docs | `README`, `test_*.py`, etc. |
+| 4 | Styles/Docs | `.css`, `.md`, `.txt` |
+| 5 | Other | Anything else |
+
+### Snippet: File Categorization
+
+```bash
+if echo "$file" | grep -qE '\.(js|jsx|ts|tsx|py|java|...)'; then
+ PRIORITY=1
+ CATEGORY="Core Code"
+```
+
+### Token Calculation (Estimates 1 token ≈ 4 characters):
+
+```bash
+ESTIMATED_TOKENS=$((DIFF_SIZE / 4))
+```
+
+### Prioritized Files Output:
+
+* Sorted by priority, then file size.
+* Only included if total token count stays below `6000`.
+* Review summary added to `focused_diff.txt`
+
+### Final Check:
+
+If total tokens still > 6000:
+
+```bash
+head -c 20000 pr_diff.txt > truncated.txt
+```
+
+---
+
+## Step 4: Review PR with GitHub Models
+
+Uses the GitHub Models API to post a review comment.
+
+### Prompt Structure Sent to Model:
+
+| Section | Content Details |
+| ---------------------------- | ------------------------------------- |
+| `## Code Review Summary` | High-level summary |
+| `## Critical Issues` | Blocking problems |
+| `## Code Quality Analysis` | Security, Performance, Best Practices |
+| `## Detailed Findings` | Tabular issues |
+| `## Code Examples` | Before/After code with explanations |
+| `## Testing Recommendations` | Test ideas |
+| `## Documentation Notes` | Documentation suggestions |
+
+### Model Call Example:
+
+```js
+fetch('https://models.github.ai/inference/chat/completions', {
+ method: 'POST',
+ headers: {
+ 'Authorization': `Bearer ${{ secrets.GH_PAT_MODELS }}`,
+ ...
+ },
+ body: JSON.stringify({
+ model: "gpt-4.1-nano",
+ messages: [...],
+ temperature: 0.1,
+ max_tokens: 4000
+ })
+})
+```
+
+---
+
+## Fallback: Graceful Failure Handling
+
+If the diff is too large for GPT:
+
+* Parse filenames from diff
+* Show summary stats (lines added/removed, file size)
+* Post fallback message with:
+
+ * Review checklist
+ * Manual review strategy
+
+```markdown
+## AI Code Review - Large Diff Analysis
+...
+- [ ] Check for hardcoded secrets
+- [ ] Optimize database queries
+...
+```
+
+---
+
+## Step 5: Display Final Stats
+
+For debugging and transparency:
+
+```bash
+wc -c < pr_diff.txt # Byte size
+wc -l < pr_diff.txt # Line count
+```
+
+Shows if the final diff obeys the token limit (\~6700 tokens max).
+
+---
+
+## Summary Table
+
+| Feature | Status |
+| ----------------------------- | ------------ |
+| Smart diff prioritization | ✅ Enabled |
+| File-based categorization | ✅ Advanced |
+| Token-safe fallbacks | ✅ Included |
+| Markdown review formatting | ✅ Structured |
+| Graceful API error handling | ✅ Robust |
+| Final logging and diagnostics | ✅ Verbose |
+
+
+---
+
+## Related Files Generated
+
+| File | Description |
+| ----------------------- | --------------------------------- |
+| `pr_diff.txt` | Full or focused diff for LLM |
+| `focused_diff.txt` | Diff after smart prioritization |
+| `sorted_files.txt` | File list sorted by priority/size |
+| `priority_analysis.txt` | Token and category for each file |
diff --git a/code/main.go b/code/main.go
new file mode 100644
index 0000000..e85e253
--- /dev/null
+++ b/code/main.go
@@ -0,0 +1 @@
+package code
diff --git a/go.mod b/go.mod
deleted file mode 100644
index ece20c9..0000000
--- a/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/keploy/code-review-agent
-
-go 1.24.3
diff --git a/js-code/.eslintrc.json b/js-code/.eslintrc.json
new file mode 100644
index 0000000..801b1c1
--- /dev/null
+++ b/js-code/.eslintrc.json
@@ -0,0 +1,19 @@
+// runs-on: ubuntu-latest
+{
+ "env": {
+ "browser": true,
+ "es2021": true,
+ "node": true
+ },
+ "extends": "eslint:recommended",
+ "parserOptions": {
+ "ecmaVersion": "latest",
+ "sourceType": "module"
+ },
+ "rules": {
+ "no-unused-vars": "warn",
+ "semi": ["error", "always"],
+ "no-console": "off"
+
+ }
+}
\ No newline at end of file
diff --git a/js-code/main.js b/js-code/main.js
new file mode 100644
index 0000000..e69de29
diff --git a/js-code/package.json b/js-code/package.json
new file mode 100644
index 0000000..952023d
--- /dev/null
+++ b/js-code/package.json
@@ -0,0 +1,15 @@
+{
+ "name": "js-code",
+ "version": "1.0.0",
+ "description": "JavaScript code for static analysis",
+ "main": "main.js",
+ "scripts": {
+ "lint": "eslint . --ext .js,.jsx"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "ISC",
+ "devDependencies": {
+ "eslint": "^8.0.0"
+ }
+}
diff --git a/main.go b/main.go
deleted file mode 100644
index 21ccb8c..0000000
--- a/main.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package codereviewagent
-
-func main(){
- // init codereview agent
-}
\ No newline at end of file