diff --git a/.github/workflows/saist.yml b/.github/workflows/saist.yml index cf64459..9740951 100644 --- a/.github/workflows/saist.yml +++ b/.github/workflows/saist.yml @@ -4,6 +4,20 @@ on: pull_request: types: [opened, synchronize, reopened] + workflow_dispatch: + inputs: + llm_provider: + description: 'LLM Provider' + required: true + default: 'deepseek' + type: choice + options: + - openai + - anthropic + - deepseek + - gemini + - ollama + jobs: security-check: runs-on: ubuntu-latest @@ -15,6 +29,8 @@ jobs: steps: - name: Check out code uses: actions/checkout@v3 + with: + fetch-depth: 0 - name: Set up Python uses: actions/setup-python@v4 @@ -25,13 +41,93 @@ jobs: run: | pip install -r requirements.txt + - name: Determine LLM Provider and Scan Mode + id: config + run: | + if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then + LLM_PROVIDER="${{ github.event.inputs.llm_provider }}" + SCAN_MODE="filesystem" + SCAN_TARGET="." + echo "🎯 Manual run: Using $LLM_PROVIDER with filesystem scan" + else + # PR run - use default provider with proper fallback + if [ -n "${{ vars.DEFAULT_LLM_PROVIDER }}" ]; then + LLM_PROVIDER="${{ vars.DEFAULT_LLM_PROVIDER }}" + else + LLM_PROVIDER="deepseek" + fi + SCAN_MODE="github" + SCAN_TARGET="" + echo "🔄 PR run: Using $LLM_PROVIDER with github scan" + fi + + # Save for next steps + echo "llm_provider=$LLM_PROVIDER" >> $GITHUB_OUTPUT + echo "scan_mode=$SCAN_MODE" >> $GITHUB_OUTPUT + echo "scan_target=$SCAN_TARGET" >> $GITHUB_OUTPUT + + # Set API key secret name + case $LLM_PROVIDER in + openai) echo "api_key_secret=OPENAI_API_KEY" >> $GITHUB_OUTPUT ;; + anthropic) echo "api_key_secret=ANTHROPIC_API_KEY" >> $GITHUB_OUTPUT ;; + deepseek) echo "api_key_secret=DEEPSEEK_API_KEY" >> $GITHUB_OUTPUT ;; + gemini) echo "api_key_secret=GEMINI_API_KEY" >> $GITHUB_OUTPUT ;; + ollama) echo "api_key_secret=NONE" >> $GITHUB_OUTPUT ;; + esac + - name: Run Security Review - # We do NOT need a separate secret for GITHUB_TOKEN; it's provided by GitHub automatically. - # We DO need a secret for OPENAI_API_KEY, stored in your repo or org secrets. env: - GITHUB_TOKEN: ${{ github.token }} # The pipeline token + GITHUB_TOKEN: ${{ github.token }} GITHUB_REPOSITORY: ${{ github.repository }} PR_NUMBER: ${{ github.event.pull_request.number }} - SAIST_LLM_API_KEY: ${{ secrets.DEEPSEEK_API_KEY }} run: | - python saist/main.py --llm deepseek github + echo "🔍 Running SAIST with ${{ steps.config.outputs.llm_provider }}" + echo "📁 Scan mode: ${{ steps.config.outputs.scan_mode }}" + + # Get the API key for the selected provider + case "${{ steps.config.outputs.llm_provider }}" in + openai) + SAIST_LLM_API_KEY="${{ secrets.OPENAI_API_KEY }}" + ;; + anthropic) + SAIST_LLM_API_KEY="${{ secrets.ANTHROPIC_API_KEY }}" + ;; + deepseek) + SAIST_LLM_API_KEY="${{ secrets.DEEPSEEK_API_KEY }}" + ;; + gemini) + SAIST_LLM_API_KEY="${{ secrets.GEMINI_API_KEY }}" + ;; + ollama) + SAIST_LLM_API_KEY="" # Ollama doesn't need an API key + ;; + esac + + # Build the command with API key checking + if [ "${{ steps.config.outputs.scan_mode }}" = "github" ]; then + # PR mode - scan the github PR with token + if [ -n "$SAIST_LLM_API_KEY" ]; then + python saist/main.py --llm ${{ steps.config.outputs.llm_provider }} --llm-api-key "$SAIST_LLM_API_KEY" github ${{ github.repository }} ${{ github.event.pull_request.number }} --github-token "${{ github.token }}" + else + python saist/main.py --llm ${{ steps.config.outputs.llm_provider }} github ${{ github.repository }} ${{ github.event.pull_request.number }} --github-token "${{ github.token }}" + fi + else + # Manual mode - scan filesystem + if [ -n "$SAIST_LLM_API_KEY" ]; then + python saist/main.py --llm ${{ steps.config.outputs.llm_provider }} --llm-api-key "$SAIST_LLM_API_KEY" --csv filesystem ${{ steps.config.outputs.scan_target }} + else + python saist/main.py --llm ${{ steps.config.outputs.llm_provider }} --csv filesystem ${{ steps.config.outputs.scan_target }} + fi + fi + + echo "✅ SAIST scan completed" + + - name: Upload Results (Manual runs only) + if: github.event_name == 'workflow_dispatch' && always() + uses: actions/upload-artifact@v4 + with: + name: saist-results-${{ github.run_number }} + path: | + findings.csv + *.pdf + retention-days: 30 diff --git a/saist/main.py b/saist/main.py index 93a7f23..479e516 100644 --- a/saist/main.py +++ b/saist/main.py @@ -119,7 +119,7 @@ async def _get_llm_adapter(args) -> BaseLlmAdapter: if args.llm == 'anthropic': llm = AnthropicAdapter( api_key = args.llm_api_key, model=model) logger.debug(f"Using LLM: anthropic Model: {llm.model_name}") - if args.llm == 'bedrock': + elif args.llm == 'bedrock': llm = BedrockAdapter( api_key = args.llm_api_key, model=model) logger.debug(f"Using LLM: AWS bedrock Model: {llm.model_name}") elif args.llm == 'deepseek':