# SonarQube quality gate configuration for OWASP compliance
# This blocks merges when OWASP-category vulnerabilities are found
quality_gate: conditions: - metric: new_security_hotspots_reviewed operator: LESS_THAN value: "100" - metric: new_vulnerabilities operator: GREATER_THAN value: "0" # Blocks merge on any new vulnerability - metric: new_security_rating operator: GREATER_THAN value: "1" # Requires A rating (no vulnerabilities)
# SonarQube quality gate configuration for OWASP compliance
# This blocks merges when OWASP-category vulnerabilities are found
quality_gate: conditions: - metric: new_security_hotspots_reviewed operator: LESS_THAN value: "100" - metric: new_vulnerabilities operator: GREATER_THAN value: "0" # Blocks merge on any new vulnerability - metric: new_security_rating operator: GREATER_THAN value: "1" # Requires A rating (no vulnerabilities)
# SonarQube quality gate configuration for OWASP compliance
# This blocks merges when OWASP-category vulnerabilities are found
quality_gate: conditions: - metric: new_security_hotspots_reviewed operator: LESS_THAN value: "100" - metric: new_vulnerabilities operator: GREATER_THAN value: "0" # Blocks merge on any new vulnerability - metric: new_security_rating operator: GREATER_THAN value: "1" # Requires A rating (no vulnerabilities)
# CodeRabbit enterprise security policy example (.coderabbit.yaml)
reviews: instructions: - "Flag any API endpoint that accepts user input without input validation" - "Require authentication middleware on all routes under /api/" - "Flag any database query constructed with string concatenation" - "Warn when error responses include stack traces or internal details" - "Flag any use of eval(), exec(), or similar dynamic execution functions" - "Require rate limiting on all public-facing endpoints" - "Flag any hardcoded credentials, API keys, or secrets" - "Warn when logging statements include sensitive data fields"
# CodeRabbit enterprise security policy example (.coderabbit.yaml)
reviews: instructions: - "Flag any API endpoint that accepts user input without input validation" - "Require authentication middleware on all routes under /api/" - "Flag any database query constructed with string concatenation" - "Warn when error responses include stack traces or internal details" - "Flag any use of eval(), exec(), or similar dynamic execution functions" - "Require rate limiting on all public-facing endpoints" - "Flag any hardcoded credentials, API keys, or secrets" - "Warn when logging statements include sensitive data fields"
# CodeRabbit enterprise security policy example (.coderabbit.yaml)
reviews: instructions: - "Flag any API endpoint that accepts user input without input validation" - "Require authentication middleware on all routes under /api/" - "Flag any database query constructed with string concatenation" - "Warn when error responses include stack traces or internal details" - "Flag any use of eval(), exec(), or similar dynamic execution functions" - "Require rate limiting on all public-facing endpoints" - "Flag any hardcoded credentials, API keys, or secrets" - "Warn when logging statements include sensitive data fields"
# Semgrep enterprise policy configuration
# Applied across all repositories via CI/CD template
rules: - id: enterprise-auth-required patterns: - pattern: | app.$METHOD($PATH, async (req, res) => { ... }) - pattern-not: | app.$METHOD($PATH, authMiddleware, async (req, res) => { ... }) message: "All API endpoints must use authMiddleware" severity: ERROR metadata: owasp: "A01:2021 Broken Access Control" cwe: "CWE-862: Missing Authorization" compliance: ["SOC2-CC6.1", "HIPAA-164.312(d)"]
# Semgrep enterprise policy configuration
# Applied across all repositories via CI/CD template
rules: - id: enterprise-auth-required patterns: - pattern: | app.$METHOD($PATH, async (req, res) => { ... }) - pattern-not: | app.$METHOD($PATH, authMiddleware, async (req, res) => { ... }) message: "All API endpoints must use authMiddleware" severity: ERROR metadata: owasp: "A01:2021 Broken Access Control" cwe: "CWE-862: Missing Authorization" compliance: ["SOC2-CC6.1", "HIPAA-164.312(d)"]
# Semgrep enterprise policy configuration
# Applied across all repositories via CI/CD template
rules: - id: enterprise-auth-required patterns: - pattern: | app.$METHOD($PATH, async (req, res) => { ... }) - pattern-not: | app.$METHOD($PATH, authMiddleware, async (req, res) => { ... }) message: "All API endpoints must use authMiddleware" severity: ERROR metadata: owasp: "A01:2021 Broken Access Control" cwe: "CWE-862: Missing Authorization" compliance: ["SOC2-CC6.1", "HIPAA-164.312(d)"]
# Example: Shared GitHub Actions workflow for organization-wide Semgrep scanning
# .github/workflows/semgrep.yml in a shared workflow repository
name: Semgrep Enterprise Scan
on: workflow_call: secrets: SEMGREP_APP_TOKEN: required: true jobs: scan: runs-on: ubuntu-latest container: image: semgrep/semgrep:latest steps: - uses: actions/checkout@v4 - run: semgrep ci env: SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }} SEMGREP_RULES: >- p/security-audit p/owasp-top-ten p/secrets org-specific/custom-rules
# Example: Shared GitHub Actions workflow for organization-wide Semgrep scanning
# .github/workflows/semgrep.yml in a shared workflow repository
name: Semgrep Enterprise Scan
on: workflow_call: secrets: SEMGREP_APP_TOKEN: required: true jobs: scan: runs-on: ubuntu-latest container: image: semgrep/semgrep:latest steps: - uses: actions/checkout@v4 - run: semgrep ci env: SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }} SEMGREP_RULES: >- p/security-audit p/owasp-top-ten p/secrets org-specific/custom-rules
# Example: Shared GitHub Actions workflow for organization-wide Semgrep scanning
# .github/workflows/semgrep.yml in a shared workflow repository
name: Semgrep Enterprise Scan
on: workflow_call: secrets: SEMGREP_APP_TOKEN: required: true jobs: scan: runs-on: ubuntu-latest container: image: semgrep/semgrep:latest steps: - uses: actions/checkout@v4 - run: semgrep ci env: SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }} SEMGREP_RULES: >- p/security-audit p/owasp-top-ten p/secrets org-specific/custom-rules
# In each repository: .github/workflows/security.yml
name: Security
on: [pull_request]
jobs: semgrep: uses: my-org/.github/.github/workflows/semgrep.yml@main secrets: inherit
# In each repository: .github/workflows/security.yml
name: Security
on: [pull_request]
jobs: semgrep: uses: my-org/.github/.github/workflows/semgrep.yml@main secrets: inherit
# In each repository: .github/workflows/security.yml
name: Security
on: [pull_request]
jobs: semgrep: uses: my-org/.github/.github/workflows/semgrep.yml@main secrets: inherit
Organization policy (mandatory) | +-- Backend team policy (additional) | | | +-- payments--weight: 500;">service (fine-tuning) | +-- user--weight: 500;">service (fine-tuning) | +-- Frontend team policy (additional) | | | +-- web-app (fine-tuning) | +-- mobile-app (fine-tuning) | +-- Infrastructure team policy (additional) | +-- terraform-modules (fine-tuning)
Organization policy (mandatory) | +-- Backend team policy (additional) | | | +-- payments--weight: 500;">service (fine-tuning) | +-- user--weight: 500;">service (fine-tuning) | +-- Frontend team policy (additional) | | | +-- web-app (fine-tuning) | +-- mobile-app (fine-tuning) | +-- Infrastructure team policy (additional) | +-- terraform-modules (fine-tuning)
Organization policy (mandatory) | +-- Backend team policy (additional) | | | +-- payments--weight: 500;">service (fine-tuning) | +-- user--weight: 500;">service (fine-tuning) | +-- Frontend team policy (additional) | | | +-- web-app (fine-tuning) | +-- mobile-app (fine-tuning) | +-- Infrastructure team policy (additional) | +-- terraform-modules (fine-tuning)
Review time per PR: 30 minutes average
Total review time per month: 1,000 PRs x 30 min = 500 hours
Average developer cost: $80/hour (fully loaded)
Monthly cost of review: 500 x $80 = $40,000
Annual cost of review: $480,000
Review time per PR: 30 minutes average
Total review time per month: 1,000 PRs x 30 min = 500 hours
Average developer cost: $80/hour (fully loaded)
Monthly cost of review: 500 x $80 = $40,000
Annual cost of review: $480,000
Review time per PR: 30 minutes average
Total review time per month: 1,000 PRs x 30 min = 500 hours
Average developer cost: $80/hour (fully loaded)
Monthly cost of review: 500 x $80 = $40,000
Annual cost of review: $480,000
Defects introduced per month: 50 (estimate)
Defects caught in review (60%): 30
Defects reaching production (40%): 20
Average cost to fix in production: $5,000
Monthly cost of missed defects: 20 x $5,000 = $100,000
Annual cost of missed defects: $1,200,000
Defects introduced per month: 50 (estimate)
Defects caught in review (60%): 30
Defects reaching production (40%): 20
Average cost to fix in production: $5,000
Monthly cost of missed defects: 20 x $5,000 = $100,000
Annual cost of missed defects: $1,200,000
Defects introduced per month: 50 (estimate)
Defects caught in review (60%): 30
Defects reaching production (40%): 20
Average cost to fix in production: $5,000
Monthly cost of missed defects: 20 x $5,000 = $100,000
Annual cost of missed defects: $1,200,000
Average review wait time: 8 hours
PRs per month: 1,000
Developer hours blocked: 8,000 hours/month
Productivity loss (20% estimate): 1,600 productive hours lost
Cost of lost productivity: 1,600 x $80 = $128,000/month
Annual cost of review bottleneck: $1,536,000
Average review wait time: 8 hours
PRs per month: 1,000
Developer hours blocked: 8,000 hours/month
Productivity loss (20% estimate): 1,600 productive hours lost
Cost of lost productivity: 1,600 x $80 = $128,000/month
Annual cost of review bottleneck: $1,536,000
Average review wait time: 8 hours
PRs per month: 1,000
Developer hours blocked: 8,000 hours/month
Productivity loss (20% estimate): 1,600 productive hours lost
Cost of lost productivity: 1,600 x $80 = $128,000/month
Annual cost of review bottleneck: $1,536,000
Review labor: $480,000
Missed defects: $1,200,000
Review bottleneck: $1,536,000
Total: $3,216,000
Review labor: $480,000
Missed defects: $1,200,000
Review bottleneck: $1,536,000
Total: $3,216,000
Review labor: $480,000
Missed defects: $1,200,000
Review bottleneck: $1,536,000
Total: $3,216,000
Review time reduction: 40%
Annual review labor saved: $480,000 x 0.40 = $192,000
Review time reduction: 40%
Annual review labor saved: $480,000 x 0.40 = $192,000
Review time reduction: 40%
Annual review labor saved: $480,000 x 0.40 = $192,000
Previous detection rate: 60% (30 of 50 defects)
Improved detection rate: 80% (40 of 50 defects)
Additional defects caught: 10 per month
Production fix cost avoided: 10 x $5,000 = $50,000/month
Annual savings from detection: $600,000
Previous detection rate: 60% (30 of 50 defects)
Improved detection rate: 80% (40 of 50 defects)
Additional defects caught: 10 per month
Production fix cost avoided: 10 x $5,000 = $50,000/month
Annual savings from detection: $600,000
Previous detection rate: 60% (30 of 50 defects)
Improved detection rate: 80% (40 of 50 defects)
Additional defects caught: 10 per month
Production fix cost avoided: 10 x $5,000 = $50,000/month
Annual savings from detection: $600,000
Average wait time with AI: 2 hours (down from 8)
Productivity recovery: 75%
Annual bottleneck savings: $1,536,000 x 0.75 = $1,152,000
Average wait time with AI: 2 hours (down from 8)
Productivity recovery: 75%
Annual bottleneck savings: $1,536,000 x 0.75 = $1,152,000
Average wait time with AI: 2 hours (down from 8)
Productivity recovery: 75%
Annual bottleneck savings: $1,536,000 x 0.75 = $1,152,000
Annual savings: Review labor reduction: $192,000 Improved defect detection: $600,000 Reduced review bottleneck: $1,152,000 Total annual savings: $1,944,000 Annual tooling costs (100 developers): CodeRabbit Enterprise: $28,800 ($24/user/month) SonarQube Enterprise: $50,000 (LOC-based) Semgrep Team: $48,000 ($40/contributor/month) Integration and maintenance: $50,000 (platform team time) Total annual cost: $176,800 Net annual benefit: $1,767,200
ROI: 999%
Payback period: ~1.1 months
Annual savings: Review labor reduction: $192,000 Improved defect detection: $600,000 Reduced review bottleneck: $1,152,000 Total annual savings: $1,944,000 Annual tooling costs (100 developers): CodeRabbit Enterprise: $28,800 ($24/user/month) SonarQube Enterprise: $50,000 (LOC-based) Semgrep Team: $48,000 ($40/contributor/month) Integration and maintenance: $50,000 (platform team time) Total annual cost: $176,800 Net annual benefit: $1,767,200
ROI: 999%
Payback period: ~1.1 months
Annual savings: Review labor reduction: $192,000 Improved defect detection: $600,000 Reduced review bottleneck: $1,152,000 Total annual savings: $1,944,000 Annual tooling costs (100 developers): CodeRabbit Enterprise: $28,800 ($24/user/month) SonarQube Enterprise: $50,000 (LOC-based) Semgrep Team: $48,000 ($40/contributor/month) Integration and maintenance: $50,000 (platform team time) Total annual cost: $176,800 Net annual benefit: $1,767,200
ROI: 999%
Payback period: ~1.1 months
Enterprise Governance Dashboard - Key Metrics Security: Open Critical Vulnerabilities: 12 (down 45% from Q1) Mean Time to Remediate (Critical): 18 hours (SLA: 24 hours) OWASP Coverage: 9/10 categories covered Coverage: Repositories with AI Review: 187/203 (92%) PRs Reviewed by AI (last 30 days): 4,823/4,891 (98.6%) Quality Gate Pass Rate: 94.2% Efficiency: Average AI Review Time: 3.2 minutes Average Human Review Time: 18 minutes (down from 32) Developer Satisfaction (survey): 4.1/5.0
Enterprise Governance Dashboard - Key Metrics Security: Open Critical Vulnerabilities: 12 (down 45% from Q1) Mean Time to Remediate (Critical): 18 hours (SLA: 24 hours) OWASP Coverage: 9/10 categories covered Coverage: Repositories with AI Review: 187/203 (92%) PRs Reviewed by AI (last 30 days): 4,823/4,891 (98.6%) Quality Gate Pass Rate: 94.2% Efficiency: Average AI Review Time: 3.2 minutes Average Human Review Time: 18 minutes (down from 32) Developer Satisfaction (survey): 4.1/5.0
Enterprise Governance Dashboard - Key Metrics Security: Open Critical Vulnerabilities: 12 (down 45% from Q1) Mean Time to Remediate (Critical): 18 hours (SLA: 24 hours) OWASP Coverage: 9/10 categories covered Coverage: Repositories with AI Review: 187/203 (92%) PRs Reviewed by AI (last 30 days): 4,823/4,891 (98.6%) Quality Gate Pass Rate: 94.2% Efficiency: Average AI Review Time: 3.2 minutes Average Human Review Time: 18 minutes (down from 32) Developer Satisfaction (survey): 4.1/5.0 - Confidentiality - Is source code encrypted in transit and at rest? Who has access to customer code within the vendor's organization? What data retention policies exist?
- Security - How does the vendor protect against unauthorized access to the review infrastructure? What happens if the vendor's systems are breached?
- Processing integrity - Does the tool process code accurately and completely? Are there controls to prevent code from being altered during analysis?
- Availability - What uptime guarantees exist? What happens to your CI/CD pipeline if the review tool is unavailable? - Business Associate Agreement (BAA) - The tool vendor must sign a BAA acknowledging their obligations under HIPAA when processing code that may contain ePHI references.
- Encryption standards - Code must be encrypted with AES-256 or equivalent at rest and TLS 1.2+ in transit.
- Access controls - The tool must support role-based access, audit logging of all access events, and automatic session termination.
- Data retention controls - You must be able to configure how long the vendor retains code data, with the ability to request deletion. - GitHub Advanced Security (including CodeQL) operates within GitHub's FedRAMP-authorized Government Cloud environment
- Checkmarx offers FedRAMP-authorized deployment options
- Veracode has a FedRAMP-authorized platform - Where are code analysis servers located? Some tools process code in US-only data centers, which violates EU data residency requirements.
- Can you select a processing region? Enterprise-tier tools typically offer region selection. Checkmarx offers EU and US processing regions. SonarQube Cloud offers EU hosting.
- Does the AI component send data to a different region? Some tools use AI models hosted by third parties (OpenAI, Anthropic) whose servers may be in a different region than the tool's primary infrastructure. Verify the complete data flow.
- Do self-hosted options eliminate residency concerns? Yes - fully self-hosted deployments by definition keep data within your chosen infrastructure. This is why many European enterprises prefer SonarQube self-hosted, Semgrep OSS, or on-premises Checkmarx deployments. - Strongest AI capabilities - cloud tools leverage the latest and largest LLMs without you managing GPU infrastructure
- Zero operational overhead - no servers to maintain, no updates to apply, no scaling to manage
- Fastest time to value - most teams are running within an hour
- Continuous improvement - the vendor updates models, rules, and features without requiring action on your part - Code leaves your infrastructure - diffs and sometimes full file context are transmitted to the vendor's servers
- Dependency on vendor availability - if the vendor has an outage, your CI/CD pipeline may be affected
- Limited control over data handling - you rely on the vendor's policies rather than your own controls
- Potential compliance gaps - not all cloud tools meet every compliance framework - CodeRabbit - AI-first PR review with SOC 2 compliance, zero-retention policy for code, and enterprise SSO support
- Snyk Code - Cloud-native SAST with deep dataflow analysis and SOC 2/ISO 27001 compliance
- DeepSource - Low false-positive code analysis with SOC 2 compliance and configurable data retention
- Codacy - Code quality and security platform with SOC 2 compliance and GDPR readiness - Code never leaves your network - eliminates all data transmission concerns
- Full control over infrastructure - choose your own cloud region, configure network policies, manage access controls
- Compliance by design - no need for BAAs, data processing agreements, or vendor trust when code stays internal
- Air-gap capable - some tools can run with zero internet connectivity - Operational overhead - you are responsible for server provisioning, updates, scaling, backup, and monitoring
- Limited AI capabilities - self-hosted tools typically cannot leverage cloud-hosted LLMs, reducing the depth of AI analysis
- Slower feature updates - you must apply updates yourself, and may fall behind the cloud version
- Higher total cost of ownership - infrastructure costs, engineering time for maintenance, and opportunity cost - SonarQube - Available in Community (free), Developer, Enterprise, and Data Center editions, all fully self-hosted
- Semgrep - OSS CLI runs anywhere with no network requirements; Semgrep AppSec Platform can be self-managed
- Checkmarx - Full on-premises deployment option with the complete enterprise feature set
- Veracode - Offers on-premises scanning agents that analyze code locally and send only metadata to the cloud - SonarQube Enterprise - Runs fully offline with no external dependencies. All rules, analysis engines, and dashboards work without internet access. Updates are applied via offline packages.
- Semgrep OSS - The CLI runs locally with no network calls. Rules can be downloaded once and bundled with the installation. Custom rules work entirely offline.
- Checkmarx on-premises - Designed for air-gapped deployment with offline rule updates and local-only scanning.
- Coverity - Deep static analysis that runs entirely on local infrastructure, designed for defense and aerospace organizations. - All repositories: Self-hosted SonarQube or Semgrep for deterministic SAST, quality gates, and compliance scanning. Code never leaves the enterprise network.
- General repositories (80% of codebase): Cloud-hosted CodeRabbit or Snyk Code for AI-powered PR review on codebases that do not contain highly sensitive data.
- Sensitive repositories (20% of codebase): No cloud AI tools. Enhanced manual review with security champions. Self-hosted scanning only. - OWASP Top 10 - The most widely recognized web application security standard. SonarQube, Checkmarx, Veracode, and Semgrep all provide OWASP Top 10 coverage reports showing which categories are covered and which findings map to each category.
- CWE (Common Weakness Enumeration) - A more granular taxonomy with 900+ entries. Enterprise SAST tools tag each finding with its CWE ID, enabling precise tracking and reporting. Checkmarx maps findings to CWE IDs across 400+ weakness types. Veracode provides similar depth.
- SANS Top 25 - A prioritized subset of CWE focused on the most dangerous software weaknesses. SonarQube Enterprise and Semgrep include SANS Top 25 reporting. - Vulnerability trend reports - Show auditors that your vulnerability count is decreasing over time, demonstrating that your security program is effective.
- Coverage reports - Prove that every code change was scanned for specific vulnerability categories (OWASP Top 10, CWE Top 25).
- Remediation SLA reports - Demonstrate that vulnerabilities are being fixed within agreed timeframes.
- Policy enforcement evidence - Show that quality gates blocked non-compliant code from being merged. - SonarQube uses quality profiles and quality gates that can be set as defaults for the entire organization. A single quality profile change propagates to all projects automatically. Individual projects can override the default only with explicit administrator approval.
- CodeRabbit supports organization-level .coderabbit.yaml files that apply to all repositories. Individual repositories can extend or override the organization configuration, providing flexibility while maintaining a baseline.
- Semgrep allows deploying custom rule packs as CI/CD templates. A shared GitHub Actions workflow or GitLab CI template includes the organization's Semgrep configuration, ensuring every repository runs the same scans. Updates to the template propagate automatically.
- Checkmarx provides centralized policy management through its management console, where security teams define scanning presets that apply across all projects. - Organization layer - Mandatory policies that apply to all repositories. Security scanning, secret detection, and compliance-critical rules. These cannot be overridden.
- Team or department layer - Additional rules relevant to the team's technology stack and domain. A payments team adds PCI-relevant rules. A healthcare team adds HIPAA-relevant rules.
- Repository layer - Fine-tuning for the specific codebase. Suppressing false positives, adjusting severity levels, adding project-specific patterns. - PR-to-ticket linking. CodeRabbit reads Jira ticket references from PR titles, descriptions, and branch names (e.g., feature/PROJ-1234-add-auth) and includes the ticket context in its review. This helps the AI understand the intent behind the change.
- Finding-to-ticket creation. SonarQube and Checkmarx can automatically create Jira tickets for security findings that exceed a severity threshold. Each ticket includes the vulnerability details, affected file, remediation guidance, and a link back to the tool's dashboard.
- Sprint-level vulnerability tracking. Teams use Jira dashboards to track vulnerability remediation alongside feature work. AI code review findings flow into the same backlog, ensuring security work is visible and prioritized alongside business deliverables. - Change management records. In organizations that require change management approval for production deployments, AI code review results can be attached to ServiceNow change requests as evidence that code was reviewed and scanned for security issues.
- Security incident creation. Critical security findings from tools like Checkmarx or Veracode can automatically create ServiceNow security incidents, ensuring they are triaged by the security operations team.
- CMDB integration. Mapping code repositories to ServiceNow configuration items (CIs) enables correlation between code changes and -weight: 500;">service health. When a production incident occurs, teams can trace it back to recent code changes and their review results. - Critical vulnerability alerting. When AI code review detects a critical vulnerability - for example, a SQL injection in a production-facing endpoint - an alert can be sent to PagerDuty to page the on-call security engineer.
- Build failure escalation. When security quality gates block a high-priority deployment, PagerDuty can alert the appropriate team to remediate the finding quickly. - SAML/OIDC SSO - All enterprise-tier tools (CodeRabbit, SonarQube, Checkmarx, Veracode, Snyk Code, Codacy) support SAML 2.0 or OIDC-based SSO with providers like Okta, Azure AD, and PingFederate.
- SCIM provisioning - Automatic user provisioning and deprovisioning based on IdP group membership. This ensures that when a developer leaves the organization, their access to code review tools is revoked automatically.
- RBAC - Role-based access control that maps IdP groups to tool permissions. Security teams get admin access to policy configuration. Developers get standard access to view findings and manage suppressions. Auditors get read-only access to dashboards and reports. - Who reviewed the code? Both human reviewers and AI tools should be tracked. The audit trail should show which AI tool reviewed the PR, what findings it generated, and which human approved the merge.
- What was the scope of review? Which files were analyzed? Which rules or policies were applied? Were any files excluded from scanning?
- When was the review performed? Timestamps for AI analysis initiation, completion, human review actions, and merge approval.
- What findings were generated? All findings, including those that were suppressed or marked as false positives. The suppression reason and approver should be recorded.
- What action was taken? For each finding, was it remediated, suppressed, or accepted as a known risk? Who made the decision? - Security posture over time - Trend lines showing total open vulnerabilities, time-to-remediation, and new vulnerability introduction rate. A downward trend demonstrates program effectiveness.
- Coverage metrics - Percentage of repositories with AI code review enabled, percentage of PRs that received AI review, percentage of findings remediated within SLA.
- Quality gate compliance - Percentage of merges that passed quality gates versus those that required manual override. A high override rate indicates gates that are either too strict or not well-understood.
- Tool effectiveness - False positive rate, finding-to-fix rate, and developer satisfaction with AI review feedback. These metrics inform ongoing tool tuning and selection decisions. - Suppression requires a reason. All tools should be configured to require a comment or category when suppressing a finding (false positive, won't fix, acceptable risk).
- Suppression review. Findings marked as "won't fix" or "acceptable risk" should be reviewed by a security champion or security team member. Configure a monthly review cadence.
- Suppression expiration. Some enterprise teams set suppression expiration dates - a suppressed finding resurfaces after 90 days for re-evaluation. SonarQube supports this through its "accepted issues" workflow.
- Suppression audit. Track suppression rates by team and by developer. A consistently high suppression rate on a specific team may indicate that the tool needs tuning for their technology stack, or that the team needs additional security training. - Annual contract tools (Checkmarx, Veracode) typically include volume discounts at 200+ seats.
- SonarQube pricing varies dramatically by edition and lines of code. Community Build is free. Data Center Edition for a 500K LOC codebase with high availability can exceed $150,000/yr.
- Most per-seat tools offer enterprise tier pricing that is lower per seat than the listed price at 200+ users. Always negotiate.
- Integration costs (platform team time, CI/CD configuration, training) typically add 15 to 25 percent on top of license costs in year one. - SonarQube Developer or Enterprise for deterministic SAST and code quality. Self-hosted. $20,000 to $80,000/yr depending on edition and LOC.
- CodeRabbit for AI-powered PR review across all repositories. $57,600/yr at enterprise tier. - Semgrep Team for advanced SAST with custom rules and supply chain scanning. $96,000/yr for 200 contributors, but can be scoped to specific repositories.
- Snyk Code for cross-file dataflow analysis on the most security-sensitive services. $60,000 to $120,000/yr depending on scope. - Checkmarx or Veracode for compliance reporting and deep scanning of regulated codebases. $80,000 to $200,000/yr. - Does the tool have SOC 2 Type II certification? Request the report.
- Does the tool support your specific compliance requirements (HIPAA, FedRAMP, PCI DSS, GDPR)?
- What is the tool's data retention policy? Can it be configured?
- Does the vendor sign BAAs for healthcare use cases?
- Where is code processed geographically? Can you select a region?
- Does the tool train on customer code? Get this in writing, not just from the marketing page. - Is self-hosted deployment available? What are the infrastructure requirements?
- Can the tool operate in an air-gapped environment?
- What is the operational overhead of self-hosted deployment?
- Does the cloud version offer the same features as self-hosted, or are some capabilities cloud-only? - Does the tool integrate with your source code platform (GitHub, GitLab, Bitbucket, Azure DevOps)?
- Does it integrate with your CI/CD system (GitHub Actions, GitLab CI, Jenkins, CircleCI)?
- Does it integrate with your project management tool (Jira, Azure Boards)?
- Does it support SSO via your identity provider (Okta, Azure AD, PingFederate)?
- Does it support SCIM for automated user provisioning?
- Is there an API for building custom integrations and dashboards? - What are the concurrency limits for simultaneous analyses?
- What is the analysis time at your codebase size?
- Does the tool support organizational-level configuration?
- What is the maximum number of repositories supported?
- What is the vendor's uptime SLA? - How long does initial setup take per repository?
- Is the feedback delivered inline on PRs or only in a separate dashboard?
- What is the false positive rate for your technology stack?
- Can developers configure review preferences for their repositories?
- How do developers suppress false positives, and what governance exists around suppressions? - Checkmarx Enterprise for comprehensive SAST with PCI DSS reporting
- SonarQube Enterprise (self-hosted) for code quality gates and OWASP dashboards
- CodeRabbit Enterprise for AI PR review on non-PCI codebases
- Semgrep Team for custom rules specific to their financial frameworks - PCI-scoped repositories use only self-hosted tools - no cloud AI review
- All other repositories use the full tool stack including cloud AI review
- Jira integration for finding management with mandatory SLAs
- Quarterly deep scans with Checkmarx for compliance reporting
- Annual penetration testing to validate automated findings - SonarQube Enterprise (self-hosted) for all repositories - code never leaves the VPC
- Semgrep OSS for custom HIPAA-specific rules (PHI logging detection, encryption validation)
- Snyk Code with BAA for cloud-based dataflow analysis on non-PHI codebases
- CodeRabbit with custom HIPAA instructions for general code review - Repositories that process ePHI use self-hosted tools exclusively
- Custom Semgrep rules detect logging of PHI fields, unencrypted PHI storage, and PHI in error messages
- ServiceNow integration for HIPAA incident management
- Audit trails maintained in Splunk for 7-year retention - CodeRabbit Enterprise for AI PR review across all repositories
- SonarQube Data Center Edition (self-hosted, HA) for quality gates and technical debt tracking
- Semgrep Team for SAST and supply chain scanning
- GitHub Copilot Enterprise for in-IDE AI assistance - Cloud tools used universally - no regulatory restrictions on data transmission
- SonarQube Data Center deployed with high availability for zero-downtime scanning
- Shared CI/CD templates enforce consistent scanning across all 500+ repositories
- Platform team of 3 engineers manages the review infrastructure
- Self--weight: 500;">service onboarding: new repositories automatically inherit organization-level configuration
- Monthly security posture reports generated from aggregated tool data