From 18c22a1e59852954698db84a3eb007b587071d20 Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Mon, 5 Jan 2026 15:07:19 +0200 Subject: [PATCH 01/18] Add parallel PR scanning functions (CompareJasResults, UnifyScaAndJasResults) - based on v1.24.2 --- utils/results/results.go | 296 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 296 insertions(+) diff --git a/utils/results/results.go b/utils/results/results.go index 4e6baedce..43ff13c4d 100644 --- a/utils/results/results.go +++ b/utils/results/results.go @@ -3,6 +3,8 @@ package results import ( "errors" "fmt" + "path/filepath" + "strings" "sync" "time" @@ -731,3 +733,297 @@ func (jsr *JasScansResults) HasInformationByType(scanType jasutils.JasScanType) } return false } + +// ============================================================================= +// Parallel PR Scanning Functions +// These functions support running SCA and JAS scans in parallel for PR scanning +// ============================================================================= + +// UnifyScaAndJasResults merges SCA results and JAS diff results into a single SecurityCommandResults +func UnifyScaAndJasResults(scaResults, jasDiffResults *SecurityCommandResults) *SecurityCommandResults { + // Create unified results based on JAS diff structure + unifiedResults := &SecurityCommandResults{ + ResultsMetaData: ResultsMetaData{ + EntitledForJas: jasDiffResults.EntitledForJas, + SecretValidation: jasDiffResults.SecretValidation, + CmdType: jasDiffResults.CmdType, + XrayVersion: jasDiffResults.XrayVersion, + XscVersion: jasDiffResults.XscVersion, + MultiScanId: jasDiffResults.MultiScanId, + StartTime: jasDiffResults.StartTime, + ResultContext: jasDiffResults.ResultContext, + }, + } + + // Merge targets from both SCA and JAS results + for _, scaTarget := range scaResults.Targets { + // Find corresponding JAS target + var jasTarget *TargetResults + for _, jTarget := range jasDiffResults.Targets { + if jTarget.Target == scaTarget.Target { + jasTarget = jTarget + break + } + } + + // Create unified target with both SCA and JAS results + unifiedTarget := &TargetResults{ + ScanTarget: scaTarget.ScanTarget, + AppsConfigModule: scaTarget.AppsConfigModule, + ScaResults: scaTarget.ScaResults, + JasResults: nil, + } + + // Add JAS diff results if available + if jasTarget != nil { + unifiedTarget.JasResults = jasTarget.JasResults + } + + unifiedResults.Targets = append(unifiedResults.Targets, unifiedTarget) + } + + return unifiedResults +} + +// CompareJasResults computes the diff between target and source JAS results +// Returns only the NEW findings in source that don't exist in target +func CompareJasResults(targetResults, sourceResults *SecurityCommandResults) *SecurityCommandResults { + log.Info("[DIFF] Starting JAS diff calculation") + log.Debug("[DIFF] Comparing", len(sourceResults.Targets), "source targets against", len(targetResults.Targets), "target targets") + + // Create diff results based on source structure + diffResults := &SecurityCommandResults{ + ResultsMetaData: ResultsMetaData{ + EntitledForJas: sourceResults.EntitledForJas, + SecretValidation: sourceResults.SecretValidation, + CmdType: sourceResults.CmdType, + XrayVersion: sourceResults.XrayVersion, + XscVersion: sourceResults.XscVersion, + MultiScanId: sourceResults.MultiScanId, + StartTime: sourceResults.StartTime, + ResultContext: sourceResults.ResultContext, + }, + } + + // Compare each source target against ALL target targets + for _, sourceTarget := range sourceResults.Targets { + if sourceTarget.JasResults == nil { + continue + } + + // Collect ALL target JAS results to compare against + var allTargetJasResults []*JasScansResults + for _, targetTarget := range targetResults.Targets { + if targetTarget.JasResults != nil { + allTargetJasResults = append(allTargetJasResults, targetTarget.JasResults) + } + } + + // Apply diff logic against all collected targets + diffJasResults := applyAmDiffLogicAgainstAll(allTargetJasResults, sourceTarget.JasResults) + + diffTarget := &TargetResults{ + ScanTarget: sourceTarget.ScanTarget, + JasResults: diffJasResults, + } + + diffResults.Targets = append(diffResults.Targets, diffTarget) + } + + log.Info("[DIFF] JAS diff calculation completed with", len(diffResults.Targets), "diff targets") + return diffResults +} + +// applyAmDiffLogicAgainstAll applies the Analyzer Manager's diff logic against all target results +func applyAmDiffLogicAgainstAll(allTargetJasResults []*JasScansResults, sourceJasResults *JasScansResults) *JasScansResults { + if sourceJasResults == nil { + return nil + } + + // If no target results, return all source results (everything is new) + if len(allTargetJasResults) == 0 { + return sourceJasResults + } + + // Build target fingerprint set from ALL target results + targetKeys := make(map[string]bool) + + for _, targetJasResults := range allTargetJasResults { + if targetJasResults == nil { + continue + } + + // Extract from target secrets results (location-based) + for _, targetRun := range targetJasResults.GetVulnerabilitiesResults(jasutils.Secrets) { + extractLocationsOnly(targetRun, targetKeys) + } + for _, targetRun := range targetJasResults.GetViolationsResults(jasutils.Secrets) { + extractLocationsOnly(targetRun, targetKeys) + } + // Extract from target IaC results (location-based) + for _, targetRun := range targetJasResults.GetVulnerabilitiesResults(jasutils.IaC) { + extractLocationsOnly(targetRun, targetKeys) + } + for _, targetRun := range targetJasResults.GetViolationsResults(jasutils.IaC) { + extractLocationsOnly(targetRun, targetKeys) + } + // Extract from target SAST results (fingerprint-based) + for _, targetRun := range targetJasResults.GetVulnerabilitiesResults(jasutils.Sast) { + extractFingerprints(targetRun, targetKeys) + } + for _, targetRun := range targetJasResults.GetViolationsResults(jasutils.Sast) { + extractFingerprints(targetRun, targetKeys) + } + } + + log.Debug("[DIFF-AM] Built target fingerprint set with", len(targetKeys), "unique keys") + + // Filter source results - keep only what's NOT in target + filteredJasResults := &JasScansResults{} + + // Filter vulnerabilities + filteredJasResults.JasVulnerabilities.SecretsScanResults = filterSarifRuns( + sourceJasResults.JasVulnerabilities.SecretsScanResults, targetKeys) + filteredJasResults.JasVulnerabilities.IacScanResults = filterSarifRuns( + sourceJasResults.JasVulnerabilities.IacScanResults, targetKeys) + filteredJasResults.JasVulnerabilities.SastScanResults = filterSarifRuns( + sourceJasResults.JasVulnerabilities.SastScanResults, targetKeys) + + // Filter violations + filteredJasResults.JasViolations.SecretsScanResults = filterSarifRuns( + sourceJasResults.JasViolations.SecretsScanResults, targetKeys) + filteredJasResults.JasViolations.IacScanResults = filterSarifRuns( + sourceJasResults.JasViolations.IacScanResults, targetKeys) + filteredJasResults.JasViolations.SastScanResults = filterSarifRuns( + sourceJasResults.JasViolations.SastScanResults, targetKeys) + + return filteredJasResults +} + +// extractFingerprints extracts fingerprints from SARIF run (for SAST) +func extractFingerprints(run *sarif.Run, targetKeys map[string]bool) { + for _, result := range run.Results { + if result.Fingerprints != nil { + key := getResultFingerprint(result) + if key != "" { + targetKeys[key] = true + } + } else { + for _, location := range result.Locations { + key := getRelativeLocationFileName(location, run.Invocations) + getLocationSnippetText(location) + targetKeys[key] = true + } + } + } +} + +// extractLocationsOnly extracts locations (for Secrets and IaC - no fingerprints) +func extractLocationsOnly(run *sarif.Run, targetKeys map[string]bool) { + for _, result := range run.Results { + for _, location := range result.Locations { + key := getRelativeLocationFileName(location, run.Invocations) + getLocationSnippetText(location) + targetKeys[key] = true + } + } +} + +// getResultFingerprint returns the SAST fingerprint from a result +func getResultFingerprint(result *sarif.Result) string { + if result.Fingerprints != nil { + if value, ok := result.Fingerprints["precise_sink_and_sink_function"]; ok { + return value + } + } + return "" +} + +// getLocationSnippetText returns the snippet text from a location +func getLocationSnippetText(location *sarif.Location) string { + if location.PhysicalLocation != nil && location.PhysicalLocation.Region != nil && + location.PhysicalLocation.Region.Snippet != nil && location.PhysicalLocation.Region.Snippet.Text != nil { + return *location.PhysicalLocation.Region.Snippet.Text + } + return "" +} + +// getRelativeLocationFileName returns the relative file path from a location +func getRelativeLocationFileName(location *sarif.Location, invocations []*sarif.Invocation) string { + wd := "" + if len(invocations) > 0 { + wd = getInvocationWorkingDirectory(invocations[0]) + } + filePath := getLocationFileName(location) + if filePath != "" { + return extractRelativePath(filePath, wd) + } + return "" +} + +func getInvocationWorkingDirectory(invocation *sarif.Invocation) string { + if invocation != nil && invocation.WorkingDirectory != nil && invocation.WorkingDirectory.URI != nil { + return *invocation.WorkingDirectory.URI + } + return "" +} + +func getLocationFileName(location *sarif.Location) string { + if location != nil && location.PhysicalLocation != nil && location.PhysicalLocation.ArtifactLocation != nil && location.PhysicalLocation.ArtifactLocation.URI != nil { + return *location.PhysicalLocation.ArtifactLocation.URI + } + return "" +} + +func extractRelativePath(resultPath string, projectRoot string) string { + // Remove OS-specific file prefix + resultPath = strings.TrimPrefix(resultPath, "file:///private") + resultPath = strings.TrimPrefix(resultPath, "file:///") + projectRoot = strings.TrimPrefix(projectRoot, "file:///private") + projectRoot = strings.TrimPrefix(projectRoot, "file:///") + projectRoot = strings.TrimPrefix(projectRoot, "/") + + // Get relative path (removes temp directory) + relativePath := strings.ReplaceAll(resultPath, projectRoot, "") + trimSlash := strings.TrimPrefix(relativePath, string(filepath.Separator)) + return strings.TrimPrefix(trimSlash, "/") +} + +// filterSarifRuns filters SARIF runs, keeping only results that are NOT in target +func filterSarifRuns(sourceRuns []*sarif.Run, targetKeys map[string]bool) []*sarif.Run { + var filteredRuns []*sarif.Run + + for _, run := range sourceRuns { + var filteredResults []*sarif.Result + + for _, result := range run.Results { + if result.Fingerprints != nil { + // Use fingerprint for matching (SAST) + if !targetKeys[getResultFingerprint(result)] { + filteredResults = append(filteredResults, result) + } + } else { + // Use location for matching (Secrets, IaC) + var filteredLocations []*sarif.Location + for _, location := range result.Locations { + key := getRelativeLocationFileName(location, run.Invocations) + getLocationSnippetText(location) + if !targetKeys[key] { + filteredLocations = append(filteredLocations, location) + } + } + + if len(filteredLocations) > 0 { + newResult := *result + newResult.Locations = filteredLocations + filteredResults = append(filteredResults, &newResult) + } + } + } + + if len(filteredResults) > 0 { + filteredRun := *run + filteredRun.Results = filteredResults + filteredRuns = append(filteredRuns, &filteredRun) + } + } + + return filteredRuns +} From 6c07738596703ac85d6cb56d8cc2d1a8e0b1e255 Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Mon, 5 Jan 2026 17:52:01 +0200 Subject: [PATCH 02/18] Add RunBranchDiffAudit for sequential branch scanning with clean logs - Add branchdiff.go with RunBranchDiffAudit function - Scans target branch first, then source branch (sequential) - SCA diff handled internally by CLI - JAS diff handled by CompareJasResults - Add MergeStatusCodes for partial results filtering - Status codes merged (worst of target + source) for frogbot filtering --- commands/audit/branchdiff.go | 160 ++++++++++++++++++ .../go/curation-project/go.sum | 3 + utils/results/results.go | 29 ++++ 3 files changed, 192 insertions(+) create mode 100644 commands/audit/branchdiff.go create mode 100644 tests/testdata/projects/package-managers/go/curation-project/go.sum diff --git a/commands/audit/branchdiff.go b/commands/audit/branchdiff.go new file mode 100644 index 000000000..33e3dfee0 --- /dev/null +++ b/commands/audit/branchdiff.go @@ -0,0 +1,160 @@ +package audit + +import ( + "github.com/jfrog/jfrog-cli-security/utils/results" + "github.com/jfrog/jfrog-client-go/utils/log" +) + +// BranchDiffParams holds parameters for branch diff scanning +type BranchDiffParams struct { + // Base audit params (used for both target and source) + BaseParams *AuditParams + // Target branch working directory (the branch being merged into, e.g., main) + TargetWorkingDir string + // Source branch working directory (the branch with changes, e.g., feature branch) + SourceWorkingDir string +} + +// NewBranchDiffParams creates new branch diff parameters +func NewBranchDiffParams() *BranchDiffParams { + return &BranchDiffParams{ + BaseParams: NewAuditParams(), + } +} + +func (p *BranchDiffParams) SetBaseParams(params *AuditParams) *BranchDiffParams { + p.BaseParams = params + return p +} + +func (p *BranchDiffParams) SetTargetWorkingDir(dir string) *BranchDiffParams { + p.TargetWorkingDir = dir + return p +} + +func (p *BranchDiffParams) SetSourceWorkingDir(dir string) *BranchDiffParams { + p.SourceWorkingDir = dir + return p +} + +// RunBranchDiffAudit runs a diff scan between target and source branches. +// It scans target first, then source, computes the diff, and returns only NEW findings. +// Logs are sequential: all target logs first, then all source logs. +// +// Returns: +// - Unified diffed results containing only new vulnerabilities in source +// - Status codes are merged (worst of target + source) for partial results filtering +func RunBranchDiffAudit(params *BranchDiffParams) (diffResults *results.SecurityCommandResults) { + log.Info("========== BRANCH DIFF SCAN ==========") + log.Info("Target branch:", params.TargetWorkingDir) + log.Info("Source branch:", params.SourceWorkingDir) + log.Info("=======================================") + + // Phase 1: Scan TARGET branch + log.Info("") + log.Info("========== SCANNING TARGET BRANCH ==========") + targetParams := cloneAuditParams(params.BaseParams) + targetParams.SetWorkingDirs([]string{params.TargetWorkingDir}) + targetParams.SetDiffMode(true) + targetParams.SetResultsToCompare(nil) // No comparison for target - we're collecting baseline + targetParams.SetUploadCdxResults(false) // Don't upload intermediate results + + targetResults := RunAudit(targetParams) + + if targetResults.GeneralError != nil { + log.Error("Target branch scan failed:", targetResults.GeneralError) + return targetResults + } + log.Info("========== TARGET BRANCH SCAN COMPLETE ==========") + log.Info("") + + // Phase 2: Scan SOURCE branch + log.Info("========== SCANNING SOURCE BRANCH ==========") + sourceParams := cloneAuditParams(params.BaseParams) + sourceParams.SetWorkingDirs([]string{params.SourceWorkingDir}) + sourceParams.SetDiffMode(true) + sourceParams.SetResultsToCompare(targetResults) // SCA will use this for internal diff + sourceParams.SetUploadCdxResults(false) // Don't upload intermediate results + + sourceResults := RunAudit(sourceParams) + + if sourceResults.GeneralError != nil { + log.Error("Source branch scan failed:", sourceResults.GeneralError) + return sourceResults + } + log.Info("========== SOURCE BRANCH SCAN COMPLETE ==========") + log.Info("") + + // Phase 3: Compute JAS diff (SCA diff is already done internally) + log.Info("========== COMPUTING DIFF ==========") + jasDiffResults := results.CompareJasResults(targetResults, sourceResults) + + // Phase 4: Unify SCA (already diffed) + JAS (just diffed) results + diffResults = results.UnifyScaAndJasResults(sourceResults, jasDiffResults) + + // Phase 5: Merge status codes from both target and source + // This ensures partial results filtering works correctly - if ANY scan failed + // on either branch, the corresponding scanner results should be filtered + targetStatus := targetResults.GetStatusCodes() + sourceStatus := sourceResults.GetStatusCodes() + mergedStatus := results.MergeStatusCodes(targetStatus, sourceStatus) + + // Apply merged status to all targets in diff results + for _, target := range diffResults.Targets { + target.ResultsStatus = mergedStatus + } + + // Copy violation status code if set + if targetResults.ViolationsStatusCode != nil || sourceResults.ViolationsStatusCode != nil { + mergedViolationStatus := mergeViolationStatus(targetResults.ViolationsStatusCode, sourceResults.ViolationsStatusCode) + diffResults.ViolationsStatusCode = mergedViolationStatus + } + + log.Info("========== DIFF COMPLETE ==========") + log.Info("Diff results: ", len(diffResults.Targets), " targets with new findings") + + return diffResults +} + +// cloneAuditParams creates a shallow copy of AuditParams for independent configuration +func cloneAuditParams(params *AuditParams) *AuditParams { + cloned := NewAuditParams() + + // Copy basic params + cloned.AuditBasicParams = params.AuditBasicParams + + // Copy all other fields + cloned.SetResultsContext(params.resultsContext) + cloned.SetGitContext(params.gitContext) + cloned.SetBomGenerator(params.bomGenerator) + cloned.SetCustomBomGenBinaryPath(params.customBomGenBinaryPath) + cloned.SetCustomAnalyzerManagerBinaryPath(params.customAnalyzerManagerBinaryPath) + cloned.SetSastRules(params.sastRules) + cloned.SetScaScanStrategy(params.scaScanStrategy) + cloned.SetStartTime(params.startTime) + cloned.SetMultiScanId(params.multiScanId) + cloned.SetMinSeverityFilter(params.minSeverityFilter) + cloned.SetFixableOnly(params.fixableOnly) + cloned.SetThirdPartyApplicabilityScan(params.thirdPartyApplicabilityScan) + cloned.SetThreads(params.threads) + cloned.SetScansResultsOutputDir(params.scanResultsOutputDir) + cloned.SetViolationGenerator(params.violationGenerator) + cloned.SetAllowedLicenses(params.allowedLicenses) + cloned.SetRtResultRepository(params.rtResultRepository) + + return cloned +} + +// mergeViolationStatus returns the worst (non-zero) violation status +func mergeViolationStatus(a, b *int) *int { + if a == nil { + return b + } + if b == nil { + return a + } + if *a != 0 { + return a + } + return b +} diff --git a/tests/testdata/projects/package-managers/go/curation-project/go.sum b/tests/testdata/projects/package-managers/go/curation-project/go.sum new file mode 100644 index 000000000..c8bdc71cc --- /dev/null +++ b/tests/testdata/projects/package-managers/go/curation-project/go.sum @@ -0,0 +1,3 @@ +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/utils/results/results.go b/utils/results/results.go index 43ff13c4d..026ac4316 100644 --- a/utils/results/results.go +++ b/utils/results/results.go @@ -987,6 +987,35 @@ func extractRelativePath(resultPath string, projectRoot string) string { return strings.TrimPrefix(trimSlash, "/") } +// MergeStatusCodes merges two ResultsStatus structs, taking the worst (non-zero) status for each scanner +// This is used when combining target and source results to ensure partial results filtering works correctly +func MergeStatusCodes(target, source ResultsStatus) ResultsStatus { + merged := ResultsStatus{} + merged.SbomScanStatusCode = mergeStatusCode(target.SbomScanStatusCode, source.SbomScanStatusCode) + merged.ScaScanStatusCode = mergeStatusCode(target.ScaScanStatusCode, source.ScaScanStatusCode) + merged.ContextualAnalysisStatusCode = mergeStatusCode(target.ContextualAnalysisStatusCode, source.ContextualAnalysisStatusCode) + merged.SecretsScanStatusCode = mergeStatusCode(target.SecretsScanStatusCode, source.SecretsScanStatusCode) + merged.IacScanStatusCode = mergeStatusCode(target.IacScanStatusCode, source.IacScanStatusCode) + merged.SastScanStatusCode = mergeStatusCode(target.SastScanStatusCode, source.SastScanStatusCode) + merged.ViolationsStatusCode = mergeStatusCode(target.ViolationsStatusCode, source.ViolationsStatusCode) + return merged +} + +// mergeStatusCode returns the worst (non-zero) status code between two +func mergeStatusCode(a, b *int) *int { + if a == nil { + return b + } + if b == nil { + return a + } + // Return the non-zero value (failed status), or zero if both succeeded + if *a != 0 { + return a + } + return b +} + // filterSarifRuns filters SARIF runs, keeping only results that are NOT in target func filterSarifRuns(sourceRuns []*sarif.Run, targetKeys map[string]bool) []*sarif.Run { var filteredRuns []*sarif.Run From caf59ebb0d6520f929cddf6f954af2a4f3cdc8cd Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Mon, 5 Jan 2026 18:20:41 +0200 Subject: [PATCH 03/18] Add Logger field to AuditBasicParams for parallel scan log separation - Add logger field to AuditBasicParams with getter/setter - RunAudit swaps global logger if custom logger provided - Enables frogbot to capture logs per-scan for ordered output --- commands/audit/audit.go | 7 +++++++ commands/audit/auditbasicparams.go | 18 ++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/commands/audit/audit.go b/commands/audit/audit.go index 683c8cca3..3c14689e5 100644 --- a/commands/audit/audit.go +++ b/commands/audit/audit.go @@ -291,6 +291,13 @@ func (auditCmd *AuditCommand) CommandName() string { // Returns an audit Results object containing all the scan results. // If the current server is entitled for JAS, the advanced security results will be included in the scan results. func RunAudit(auditParams *AuditParams) (cmdResults *results.SecurityCommandResults) { + // If a custom logger is provided, swap it in for the duration of the scan. + // This enables log separation when running multiple scans in parallel. + if customLogger := auditParams.GetLogger(); customLogger != nil { + originalLogger := log.GetLogger() + log.SetLogger(customLogger) + defer log.SetLogger(originalLogger) + } // Prepare the command for the scan. if cmdResults = prepareToScan(auditParams); cmdResults.GeneralError != nil { return diff --git a/commands/audit/auditbasicparams.go b/commands/audit/auditbasicparams.go index 7ea2c0722..a1a981924 100644 --- a/commands/audit/auditbasicparams.go +++ b/commands/audit/auditbasicparams.go @@ -5,6 +5,7 @@ import ( "github.com/jfrog/jfrog-cli-core/v2/utils/config" "github.com/jfrog/jfrog-cli-security/utils" ioUtils "github.com/jfrog/jfrog-client-go/utils/io" + "github.com/jfrog/jfrog-client-go/utils/log" xscservices "github.com/jfrog/jfrog-client-go/xsc/services" ) @@ -82,6 +83,10 @@ type AuditBasicParams struct { xscVersion string configProfile *xscservices.ConfigProfile solutionFilePath string + // Logger allows callers to provide a custom logger for this audit. + // If set, CLI will use this logger instead of the global logger during the scan. + // This enables log separation for parallel scans. + logger log.Log } func (abp *AuditBasicParams) DirectDependencies() *[]string { @@ -342,3 +347,16 @@ func (abp *AuditBasicParams) SetSolutionFilePath(solutionFilePath string) *Audit abp.solutionFilePath = solutionFilePath return abp } + +// SetLogger sets a custom logger for this audit. +// If set, CLI will temporarily swap the global logger during the scan, +// enabling log separation for parallel scans. +func (abp *AuditBasicParams) SetLogger(logger log.Log) *AuditBasicParams { + abp.logger = logger + return abp +} + +// GetLogger returns the custom logger, or nil if not set. +func (abp *AuditBasicParams) GetLogger() log.Log { + return abp.logger +} From 7fc3bd09019cd2c861b137d581b08561dfeb407d Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Sun, 11 Jan 2026 20:54:49 +0200 Subject: [PATCH 04/18] Add LogCollector for isolated parallel audit logging - Add LogCollector that captures logs in isolated buffer per audit - Enables parallel audits without log interleaving - Uses goroutine-local logger from jfrog-client-go - Propagate logger to child goroutines in JAS runner and SCA scan - Remove unused diff completion log message --- commands/audit/audit.go | 21 +++++++---- commands/audit/auditbasicparams.go | 26 +++++++------- commands/audit/logcollector.go | 57 ++++++++++++++++++++++++++++++ jas/runner/jasrunner.go | 10 +++++- sca/scan/scascan.go | 10 +++++- utils/results/results.go | 33 ++++++++++++++++- 6 files changed, 133 insertions(+), 24 deletions(-) create mode 100644 commands/audit/logcollector.go diff --git a/commands/audit/audit.go b/commands/audit/audit.go index 3c14689e5..21aa0a98f 100644 --- a/commands/audit/audit.go +++ b/commands/audit/audit.go @@ -291,12 +291,11 @@ func (auditCmd *AuditCommand) CommandName() string { // Returns an audit Results object containing all the scan results. // If the current server is entitled for JAS, the advanced security results will be included in the scan results. func RunAudit(auditParams *AuditParams) (cmdResults *results.SecurityCommandResults) { - // If a custom logger is provided, swap it in for the duration of the scan. - // This enables log separation when running multiple scans in parallel. - if customLogger := auditParams.GetLogger(); customLogger != nil { - originalLogger := log.GetLogger() - log.SetLogger(customLogger) - defer log.SetLogger(originalLogger) + // Set up isolated logging for this audit if a log collector is provided. + // This enables completely isolated log capture for parallel audits. + if collector := auditParams.GetLogCollector(); collector != nil { + log.SetLoggerForGoroutine(collector.Logger()) + defer log.ClearLoggerForGoroutine() } // Prepare the command for the scan. if cmdResults = prepareToScan(auditParams); cmdResults.GeneralError != nil { @@ -629,7 +628,15 @@ func addJasScansToRunner(auditParallelRunner *utils.SecurityParallelRunner, audi return } auditParallelRunner.JasWg.Add(1) - if _, jasErr := auditParallelRunner.Runner.AddTaskWithError(createJasScansTask(auditParallelRunner, scanResults, serverDetails, auditParams, jasScanner), func(taskErr error) { + // Capture current logger to propagate to child goroutine + currentLogger := log.GetLogger() + jasTask := createJasScansTask(auditParallelRunner, scanResults, serverDetails, auditParams, jasScanner) + wrappedJasTask := func(threadId int) error { + log.SetLoggerForGoroutine(currentLogger) + defer log.ClearLoggerForGoroutine() + return jasTask(threadId) + } + if _, jasErr := auditParallelRunner.Runner.AddTaskWithError(wrappedJasTask, func(taskErr error) { scanResults.AddGeneralError(fmt.Errorf("failed while adding JAS scan tasks: %s", taskErr.Error()), auditParams.AllowPartialResults()) }); jasErr != nil { generalError = fmt.Errorf("failed to create JAS task: %s", jasErr.Error()) diff --git a/commands/audit/auditbasicparams.go b/commands/audit/auditbasicparams.go index a1a981924..a6e41f119 100644 --- a/commands/audit/auditbasicparams.go +++ b/commands/audit/auditbasicparams.go @@ -5,7 +5,6 @@ import ( "github.com/jfrog/jfrog-cli-core/v2/utils/config" "github.com/jfrog/jfrog-cli-security/utils" ioUtils "github.com/jfrog/jfrog-client-go/utils/io" - "github.com/jfrog/jfrog-client-go/utils/log" xscservices "github.com/jfrog/jfrog-client-go/xsc/services" ) @@ -82,11 +81,10 @@ type AuditBasicParams struct { xrayVersion string xscVersion string configProfile *xscservices.ConfigProfile - solutionFilePath string - // Logger allows callers to provide a custom logger for this audit. - // If set, CLI will use this logger instead of the global logger during the scan. - // This enables log separation for parallel scans. - logger log.Log + solutionFilePath string + // logCollector provides isolated log capture for this audit. + // When set, all logs from this audit are captured in the collector's buffer. + logCollector *LogCollector } func (abp *AuditBasicParams) DirectDependencies() *[]string { @@ -348,15 +346,15 @@ func (abp *AuditBasicParams) SetSolutionFilePath(solutionFilePath string) *Audit return abp } -// SetLogger sets a custom logger for this audit. -// If set, CLI will temporarily swap the global logger during the scan, -// enabling log separation for parallel scans. -func (abp *AuditBasicParams) SetLogger(logger log.Log) *AuditBasicParams { - abp.logger = logger +// SetLogCollector sets a log collector for isolated log capture. +// When set, all logs from this audit will be captured in the collector's buffer, +// enabling parallel audits to have completely isolated logs. +func (abp *AuditBasicParams) SetLogCollector(collector *LogCollector) *AuditBasicParams { + abp.logCollector = collector return abp } -// GetLogger returns the custom logger, or nil if not set. -func (abp *AuditBasicParams) GetLogger() log.Log { - return abp.logger +// GetLogCollector returns the log collector, or nil if not set. +func (abp *AuditBasicParams) GetLogCollector() *LogCollector { + return abp.logCollector } diff --git a/commands/audit/logcollector.go b/commands/audit/logcollector.go new file mode 100644 index 000000000..7c076c326 --- /dev/null +++ b/commands/audit/logcollector.go @@ -0,0 +1,57 @@ +package audit + +import ( + "bytes" + + "github.com/jfrog/jfrog-client-go/utils/log" +) + +// LogCollector provides isolated log capture for parallel audit operations. +// Each audit can have its own LogCollector, and all logs from that audit +// will be captured in the collector's buffer without mixing with other audits. +// +// Usage: +// +// collector := NewLogCollector(log.INFO) +// params := NewAuditParams().SetLogCollector(collector) +// results := RunAudit(params) +// logs := collector.GetLogs() // Get all logs from this audit +type LogCollector struct { + buffer *bytes.Buffer + logger log.Log +} + +// NewLogCollector creates a new log collector with the specified log level. +// All logs at or above this level will be captured. +func NewLogCollector(level log.LevelType) *LogCollector { + buf := &bytes.Buffer{} + return &LogCollector{ + buffer: buf, + logger: log.NewBufferedLogger(buf, level), + } +} + +// Logger returns the isolated logger to be used for this audit. +// This logger writes to the collector's internal buffer. +func (c *LogCollector) Logger() log.Log { + return c.logger +} + +// GetLogs returns all captured logs as a string. +// Call this after the audit completes to retrieve the isolated logs. +func (c *LogCollector) GetLogs() string { + return c.buffer.String() +} + +// GetLogsAndClear returns all captured logs and clears the buffer. +// Useful if you want to retrieve logs incrementally. +func (c *LogCollector) GetLogsAndClear() string { + logs := c.buffer.String() + c.buffer.Reset() + return logs +} + +// Clear resets the log buffer, discarding all captured logs. +func (c *LogCollector) Clear() { + c.buffer.Reset() +} diff --git a/jas/runner/jasrunner.go b/jas/runner/jasrunner.go index 1766db4c2..0208e8880 100644 --- a/jas/runner/jasrunner.go +++ b/jas/runner/jasrunner.go @@ -129,7 +129,15 @@ func addJasScanTaskForModuleIfNeeded(params JasRunnerParams, subScan utils.SubSc func addModuleJasScanTask(scanType jasutils.JasScanType, securityParallelRunner *utils.SecurityParallelRunner, task parallel.TaskFunc, scanResults *results.TargetResults, allowSkippingErrors bool) (generalError error) { securityParallelRunner.JasScannersWg.Add(1) - if _, addTaskErr := securityParallelRunner.Runner.AddTaskWithError(task, func(err error) { + // Capture the current logger to propagate to child goroutine + currentLogger := log.GetLogger() + wrappedTask := func(threadId int) error { + // Propagate the parent's logger to this child goroutine + log.SetLoggerForGoroutine(currentLogger) + defer log.ClearLoggerForGoroutine() + return task(threadId) + } + if _, addTaskErr := securityParallelRunner.Runner.AddTaskWithError(wrappedTask, func(err error) { _ = scanResults.AddTargetError(fmt.Errorf("failed to run %s scan: %s", scanType, err.Error()), allowSkippingErrors) }); addTaskErr != nil { generalError = scanResults.AddTargetError(fmt.Errorf("error occurred while adding '%s' scan to parallel runner: %s", scanType, addTaskErr.Error()), allowSkippingErrors) diff --git a/sca/scan/scascan.go b/sca/scan/scascan.go index 1cfd7ba5d..e02921956 100644 --- a/sca/scan/scascan.go +++ b/sca/scan/scascan.go @@ -73,8 +73,16 @@ func RunScaScan(strategy SbomScanStrategy, params ScaScanParams) (generalError e // For Audit scans, we run the scan in parallel using the SecurityParallelRunner. func runScaScanWithRunner(strategy SbomScanStrategy, params ScaScanParams) (generalError error) { targetResult := params.ScanResults + // Capture current logger to propagate to child goroutine + currentLogger := log.GetLogger() + scaTask := createScaScanTaskWithRunner(params.Runner, strategy, params) + wrappedScaTask := func(threadId int) error { + log.SetLoggerForGoroutine(currentLogger) + defer log.ClearLoggerForGoroutine() + return scaTask(threadId) + } // Create sca scan task - if _, taskCreationErr := params.Runner.Runner.AddTaskWithError(createScaScanTaskWithRunner(params.Runner, strategy, params), func(err error) { + if _, taskCreationErr := params.Runner.Runner.AddTaskWithError(wrappedScaTask, func(err error) { _ = targetResult.AddTargetError(fmt.Errorf("failed to execute SCA scan: %s", err.Error()), params.AllowPartialResults) }); taskCreationErr != nil { _ = targetResult.AddTargetError(fmt.Errorf("failed to create SCA scan task: %s", taskCreationErr.Error()), params.AllowPartialResults) diff --git a/utils/results/results.go b/utils/results/results.go index 026ac4316..08facadcd 100644 --- a/utils/results/results.go +++ b/utils/results/results.go @@ -830,7 +830,6 @@ func CompareJasResults(targetResults, sourceResults *SecurityCommandResults) *Se diffResults.Targets = append(diffResults.Targets, diffTarget) } - log.Info("[DIFF] JAS diff calculation completed with", len(diffResults.Targets), "diff targets") return diffResults } @@ -878,6 +877,16 @@ func applyAmDiffLogicAgainstAll(allTargetJasResults []*JasScansResults, sourceJa log.Debug("[DIFF-AM] Built target fingerprint set with", len(targetKeys), "unique keys") + // Count source results before filtering + sourceSecrets := countSarifResults(sourceJasResults.JasVulnerabilities.SecretsScanResults) + + countSarifResults(sourceJasResults.JasViolations.SecretsScanResults) + sourceIac := countSarifResults(sourceJasResults.JasVulnerabilities.IacScanResults) + + countSarifResults(sourceJasResults.JasViolations.IacScanResults) + sourceSast := countSarifResults(sourceJasResults.JasVulnerabilities.SastScanResults) + + countSarifResults(sourceJasResults.JasViolations.SastScanResults) + + log.Debug("[DIFF] Source findings before diff - Secrets:", sourceSecrets, "| IaC:", sourceIac, "| SAST:", sourceSast) + // Filter source results - keep only what's NOT in target filteredJasResults := &JasScansResults{} @@ -897,9 +906,31 @@ func applyAmDiffLogicAgainstAll(allTargetJasResults []*JasScansResults, sourceJa filteredJasResults.JasViolations.SastScanResults = filterSarifRuns( sourceJasResults.JasViolations.SastScanResults, targetKeys) + // Count filtered results after diff + diffSecrets := countSarifResults(filteredJasResults.JasVulnerabilities.SecretsScanResults) + + countSarifResults(filteredJasResults.JasViolations.SecretsScanResults) + diffIac := countSarifResults(filteredJasResults.JasVulnerabilities.IacScanResults) + + countSarifResults(filteredJasResults.JasViolations.IacScanResults) + diffSast := countSarifResults(filteredJasResults.JasVulnerabilities.SastScanResults) + + countSarifResults(filteredJasResults.JasViolations.SastScanResults) + + log.Info("[DIFF] New findings after diff - Secrets:", diffSecrets, "| IaC:", diffIac, "| SAST:", diffSast) + log.Info("[DIFF] Filtered out - Secrets:", sourceSecrets-diffSecrets, "| IaC:", sourceIac-diffIac, "| SAST:", sourceSast-diffSast) + return filteredJasResults } +// countSarifResults counts total results across all SARIF runs +func countSarifResults(runs []*sarif.Run) int { + count := 0 + for _, run := range runs { + if run != nil { + count += len(run.Results) + } + } + return count +} + // extractFingerprints extracts fingerprints from SARIF run (for SAST) func extractFingerprints(run *sarif.Run, targetKeys map[string]bool) { for _, result := range run.Results { From 96ae51a0138fd8de8d8393c2ab0bba6372104345 Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Sun, 11 Jan 2026 21:01:11 +0200 Subject: [PATCH 05/18] Update go.mod/go.sum --- go.mod | 6 +++--- go.sum | 6 ++---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 3b2015987..cf164439b 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/jfrog/jfrog-cli-security -go 1.25.4 +go 1.25.5 require ( github.com/CycloneDX/cyclonedx-go v0.9.3 @@ -11,7 +11,7 @@ require ( github.com/gookit/color v1.6.0 github.com/hashicorp/go-hclog v1.6.3 github.com/hashicorp/go-plugin v1.6.3 - github.com/jfrog/build-info-go v1.12.5-0.20251209171349-eb030db986f9 + github.com/jfrog/build-info-go v1.13.0 github.com/jfrog/froggit-go v1.20.6 github.com/jfrog/gofrog v1.7.6 github.com/jfrog/jfrog-apps-config v1.0.1 @@ -135,7 +135,7 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect ) -// replace github.com/jfrog/jfrog-client-go => github.com/jfrog/jfrog-client-go master +replace github.com/jfrog/jfrog-client-go => /Users/eyalk/Desktop/team-projects/jfrog-client-go // replace github.com/jfrog/jfrog-cli-core/v2 => github.com/jfrog/jfrog-cli-core/v2 master diff --git a/go.sum b/go.sum index 59c7866cd..22ba2fb2a 100644 --- a/go.sum +++ b/go.sum @@ -146,8 +146,8 @@ github.com/jedib0t/go-pretty/v6 v6.7.5 h1:9dJSWTJnsXJVVAbvxIFxeHf/JxoJd7GUl5o3Uz github.com/jedib0t/go-pretty/v6 v6.7.5/go.mod h1:YwC5CE4fJ1HFUDeivSV1r//AmANFHyqczZk+U6BDALU= github.com/jfrog/archiver/v3 v3.6.1 h1:LOxnkw9pOn45DzCbZNFV6K0+6dCsQ0L8mR3ZcujO5eI= github.com/jfrog/archiver/v3 v3.6.1/go.mod h1:VgR+3WZS4N+i9FaDwLZbq+jeU4B4zctXL+gL4EMzfLw= -github.com/jfrog/build-info-go v1.12.5-0.20251209171349-eb030db986f9 h1:CL7lp7Y7srwQ1vy1btX66t4wbztzEGQbqi/9tdEz7xk= -github.com/jfrog/build-info-go v1.12.5-0.20251209171349-eb030db986f9/go.mod h1:9W4U440fdTHwW1HiB/R0VQvz/5q8ZHsms9MWcq+JrdY= +github.com/jfrog/build-info-go v1.13.0 h1:bHedp1Gl+a8eR71xxP5JvkqwDj2X3r6e5NiIwNcIwRM= +github.com/jfrog/build-info-go v1.13.0/go.mod h1:+OCtMb22/D+u7Wne5lzkjJjaWr0LRZcHlDwTH86Mpwo= github.com/jfrog/froggit-go v1.20.6 h1:Xp7+LlEh0m1KGrQstb+u0aGfjRUtv1eh9xQBV3571jQ= github.com/jfrog/froggit-go v1.20.6/go.mod h1:obSG1SlsWjktkuqmKtpq7MNTTL63e0ot+ucTnlOMV88= github.com/jfrog/gofrog v1.7.6 h1:QmfAiRzVyaI7JYGsB7cxfAJePAZTzFz0gRWZSE27c6s= @@ -158,8 +158,6 @@ github.com/jfrog/jfrog-cli-artifactory v0.8.1-0.20251211075913-35ebcd308e93 h1:r github.com/jfrog/jfrog-cli-artifactory v0.8.1-0.20251211075913-35ebcd308e93/go.mod h1:7cCaRhXorlbyXZgiW5bplCExFxlnROaG21K12d8inpQ= github.com/jfrog/jfrog-cli-core/v2 v2.60.1-0.20251210085744-f8481d179ac5 h1:GYE67ubwl+ZRw3CcXFUi49EwwQp6k+qS8sX0QuHDHO8= github.com/jfrog/jfrog-cli-core/v2 v2.60.1-0.20251210085744-f8481d179ac5/go.mod h1:BMoGi2rG0udCCeaghqlNgiW3fTmT+TNnfTnBoWFYgcg= -github.com/jfrog/jfrog-client-go v1.55.1-0.20251211124639-306f15dbcf29 h1:u+FMai2cImOJExJ1Ehe8JsrpAXmPyRaDXwM60wV3bPA= -github.com/jfrog/jfrog-client-go v1.55.1-0.20251211124639-306f15dbcf29/go.mod h1:WQ5Y+oKYyHFAlCbHN925bWhnShTd2ruxZ6YTpb76fpU= github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= From b22d7c4d786749d194774f1be11cf3f8c0980763 Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Mon, 12 Jan 2026 18:56:57 +0200 Subject: [PATCH 06/18] Update LogCollector with ReplayTo for proper log formatting --- commands/audit/logcollector.go | 46 ++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/commands/audit/logcollector.go b/commands/audit/logcollector.go index 7c076c326..95e176e8e 100644 --- a/commands/audit/logcollector.go +++ b/commands/audit/logcollector.go @@ -1,57 +1,59 @@ package audit import ( - "bytes" - "github.com/jfrog/jfrog-client-go/utils/log" ) // LogCollector provides isolated log capture for parallel audit operations. // Each audit can have its own LogCollector, and all logs from that audit -// will be captured in the collector's buffer without mixing with other audits. +// will be captured without mixing with other audits. // // Usage: // // collector := NewLogCollector(log.INFO) // params := NewAuditParams().SetLogCollector(collector) // results := RunAudit(params) -// logs := collector.GetLogs() // Get all logs from this audit +// collector.ReplayTo(log.GetLogger()) // Replay logs with proper colors type LogCollector struct { - buffer *bytes.Buffer - logger log.Log + logger *log.BufferedLogger } // NewLogCollector creates a new log collector with the specified log level. // All logs at or above this level will be captured. func NewLogCollector(level log.LevelType) *LogCollector { - buf := &bytes.Buffer{} return &LogCollector{ - buffer: buf, - logger: log.NewBufferedLogger(buf, level), + logger: log.NewBufferedLogger(level), } } // Logger returns the isolated logger to be used for this audit. -// This logger writes to the collector's internal buffer. func (c *LogCollector) Logger() log.Log { return c.logger } -// GetLogs returns all captured logs as a string. -// Call this after the audit completes to retrieve the isolated logs. -func (c *LogCollector) GetLogs() string { - return c.buffer.String() +// ReplayTo replays all captured logs through the target logger. +// This preserves colors, formatting, and timestamps from the target logger. +func (c *LogCollector) ReplayTo(target log.Log) { + c.logger.ReplayTo(target) +} + +// HasLogs returns true if any logs have been captured. +func (c *LogCollector) HasLogs() bool { + return c.logger.Len() > 0 +} + +// Len returns the number of captured log entries. +func (c *LogCollector) Len() int { + return c.logger.Len() } -// GetLogsAndClear returns all captured logs and clears the buffer. -// Useful if you want to retrieve logs incrementally. -func (c *LogCollector) GetLogsAndClear() string { - logs := c.buffer.String() - c.buffer.Reset() - return logs +// String returns all captured logs as a plain text string (for debugging). +// For colored output, use ReplayTo() instead. +func (c *LogCollector) String() string { + return c.logger.String() } -// Clear resets the log buffer, discarding all captured logs. +// Clear removes all captured log entries. func (c *LogCollector) Clear() { - c.buffer.Reset() + c.logger.Clear() } From 16f7f21c8d1f10999deac0fb3518a35b212e4573 Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Tue, 13 Jan 2026 12:44:35 +0200 Subject: [PATCH 07/18] Simplify comments to match repo style --- commands/audit/audit.go | 4 +--- commands/audit/auditbasicparams.go | 4 ---- commands/audit/logcollector.go | 22 ++-------------------- jas/runner/jasrunner.go | 2 -- sca/scan/scascan.go | 1 - 5 files changed, 3 insertions(+), 30 deletions(-) diff --git a/commands/audit/audit.go b/commands/audit/audit.go index 21aa0a98f..00f8e4ac3 100644 --- a/commands/audit/audit.go +++ b/commands/audit/audit.go @@ -291,8 +291,7 @@ func (auditCmd *AuditCommand) CommandName() string { // Returns an audit Results object containing all the scan results. // If the current server is entitled for JAS, the advanced security results will be included in the scan results. func RunAudit(auditParams *AuditParams) (cmdResults *results.SecurityCommandResults) { - // Set up isolated logging for this audit if a log collector is provided. - // This enables completely isolated log capture for parallel audits. + // Set up isolated logging if a log collector is provided if collector := auditParams.GetLogCollector(); collector != nil { log.SetLoggerForGoroutine(collector.Logger()) defer log.ClearLoggerForGoroutine() @@ -628,7 +627,6 @@ func addJasScansToRunner(auditParallelRunner *utils.SecurityParallelRunner, audi return } auditParallelRunner.JasWg.Add(1) - // Capture current logger to propagate to child goroutine currentLogger := log.GetLogger() jasTask := createJasScansTask(auditParallelRunner, scanResults, serverDetails, auditParams, jasScanner) wrappedJasTask := func(threadId int) error { diff --git a/commands/audit/auditbasicparams.go b/commands/audit/auditbasicparams.go index a6e41f119..f399f9d60 100644 --- a/commands/audit/auditbasicparams.go +++ b/commands/audit/auditbasicparams.go @@ -346,15 +346,11 @@ func (abp *AuditBasicParams) SetSolutionFilePath(solutionFilePath string) *Audit return abp } -// SetLogCollector sets a log collector for isolated log capture. -// When set, all logs from this audit will be captured in the collector's buffer, -// enabling parallel audits to have completely isolated logs. func (abp *AuditBasicParams) SetLogCollector(collector *LogCollector) *AuditBasicParams { abp.logCollector = collector return abp } -// GetLogCollector returns the log collector, or nil if not set. func (abp *AuditBasicParams) GetLogCollector() *LogCollector { return abp.logCollector } diff --git a/commands/audit/logcollector.go b/commands/audit/logcollector.go index 95e176e8e..dc3061fb6 100644 --- a/commands/audit/logcollector.go +++ b/commands/audit/logcollector.go @@ -4,56 +4,38 @@ import ( "github.com/jfrog/jfrog-client-go/utils/log" ) -// LogCollector provides isolated log capture for parallel audit operations. -// Each audit can have its own LogCollector, and all logs from that audit -// will be captured without mixing with other audits. -// -// Usage: -// -// collector := NewLogCollector(log.INFO) -// params := NewAuditParams().SetLogCollector(collector) -// results := RunAudit(params) -// collector.ReplayTo(log.GetLogger()) // Replay logs with proper colors +// LogCollector captures logs for isolated parallel audit operations. type LogCollector struct { logger *log.BufferedLogger } -// NewLogCollector creates a new log collector with the specified log level. -// All logs at or above this level will be captured. func NewLogCollector(level log.LevelType) *LogCollector { return &LogCollector{ logger: log.NewBufferedLogger(level), } } -// Logger returns the isolated logger to be used for this audit. func (c *LogCollector) Logger() log.Log { return c.logger } -// ReplayTo replays all captured logs through the target logger. -// This preserves colors, formatting, and timestamps from the target logger. +// ReplayTo outputs captured logs through the target logger (preserving colors). func (c *LogCollector) ReplayTo(target log.Log) { c.logger.ReplayTo(target) } -// HasLogs returns true if any logs have been captured. func (c *LogCollector) HasLogs() bool { return c.logger.Len() > 0 } -// Len returns the number of captured log entries. func (c *LogCollector) Len() int { return c.logger.Len() } -// String returns all captured logs as a plain text string (for debugging). -// For colored output, use ReplayTo() instead. func (c *LogCollector) String() string { return c.logger.String() } -// Clear removes all captured log entries. func (c *LogCollector) Clear() { c.logger.Clear() } diff --git a/jas/runner/jasrunner.go b/jas/runner/jasrunner.go index 0208e8880..57cd309f3 100644 --- a/jas/runner/jasrunner.go +++ b/jas/runner/jasrunner.go @@ -129,10 +129,8 @@ func addJasScanTaskForModuleIfNeeded(params JasRunnerParams, subScan utils.SubSc func addModuleJasScanTask(scanType jasutils.JasScanType, securityParallelRunner *utils.SecurityParallelRunner, task parallel.TaskFunc, scanResults *results.TargetResults, allowSkippingErrors bool) (generalError error) { securityParallelRunner.JasScannersWg.Add(1) - // Capture the current logger to propagate to child goroutine currentLogger := log.GetLogger() wrappedTask := func(threadId int) error { - // Propagate the parent's logger to this child goroutine log.SetLoggerForGoroutine(currentLogger) defer log.ClearLoggerForGoroutine() return task(threadId) diff --git a/sca/scan/scascan.go b/sca/scan/scascan.go index e02921956..a8c8fa544 100644 --- a/sca/scan/scascan.go +++ b/sca/scan/scascan.go @@ -73,7 +73,6 @@ func RunScaScan(strategy SbomScanStrategy, params ScaScanParams) (generalError e // For Audit scans, we run the scan in parallel using the SecurityParallelRunner. func runScaScanWithRunner(strategy SbomScanStrategy, params ScaScanParams) (generalError error) { targetResult := params.ScanResults - // Capture current logger to propagate to child goroutine currentLogger := log.GetLogger() scaTask := createScaScanTaskWithRunner(params.Runner, strategy, params) wrappedScaTask := func(threadId int) error { From c38b6c3d85d80b1e52106d636b51d98111140312 Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Tue, 13 Jan 2026 12:45:08 +0200 Subject: [PATCH 08/18] Remove accidentally added test file --- .../projects/package-managers/go/curation-project/go.sum | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 tests/testdata/projects/package-managers/go/curation-project/go.sum diff --git a/tests/testdata/projects/package-managers/go/curation-project/go.sum b/tests/testdata/projects/package-managers/go/curation-project/go.sum deleted file mode 100644 index c8bdc71cc..000000000 --- a/tests/testdata/projects/package-managers/go/curation-project/go.sum +++ /dev/null @@ -1,3 +0,0 @@ -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= From 5e4a231fd72c8f1ce457773c35718ac072b31944 Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Tue, 13 Jan 2026 12:52:29 +0200 Subject: [PATCH 09/18] Remove unused branchdiff.go --- commands/audit/branchdiff.go | 160 ----------------------------------- 1 file changed, 160 deletions(-) delete mode 100644 commands/audit/branchdiff.go diff --git a/commands/audit/branchdiff.go b/commands/audit/branchdiff.go deleted file mode 100644 index 33e3dfee0..000000000 --- a/commands/audit/branchdiff.go +++ /dev/null @@ -1,160 +0,0 @@ -package audit - -import ( - "github.com/jfrog/jfrog-cli-security/utils/results" - "github.com/jfrog/jfrog-client-go/utils/log" -) - -// BranchDiffParams holds parameters for branch diff scanning -type BranchDiffParams struct { - // Base audit params (used for both target and source) - BaseParams *AuditParams - // Target branch working directory (the branch being merged into, e.g., main) - TargetWorkingDir string - // Source branch working directory (the branch with changes, e.g., feature branch) - SourceWorkingDir string -} - -// NewBranchDiffParams creates new branch diff parameters -func NewBranchDiffParams() *BranchDiffParams { - return &BranchDiffParams{ - BaseParams: NewAuditParams(), - } -} - -func (p *BranchDiffParams) SetBaseParams(params *AuditParams) *BranchDiffParams { - p.BaseParams = params - return p -} - -func (p *BranchDiffParams) SetTargetWorkingDir(dir string) *BranchDiffParams { - p.TargetWorkingDir = dir - return p -} - -func (p *BranchDiffParams) SetSourceWorkingDir(dir string) *BranchDiffParams { - p.SourceWorkingDir = dir - return p -} - -// RunBranchDiffAudit runs a diff scan between target and source branches. -// It scans target first, then source, computes the diff, and returns only NEW findings. -// Logs are sequential: all target logs first, then all source logs. -// -// Returns: -// - Unified diffed results containing only new vulnerabilities in source -// - Status codes are merged (worst of target + source) for partial results filtering -func RunBranchDiffAudit(params *BranchDiffParams) (diffResults *results.SecurityCommandResults) { - log.Info("========== BRANCH DIFF SCAN ==========") - log.Info("Target branch:", params.TargetWorkingDir) - log.Info("Source branch:", params.SourceWorkingDir) - log.Info("=======================================") - - // Phase 1: Scan TARGET branch - log.Info("") - log.Info("========== SCANNING TARGET BRANCH ==========") - targetParams := cloneAuditParams(params.BaseParams) - targetParams.SetWorkingDirs([]string{params.TargetWorkingDir}) - targetParams.SetDiffMode(true) - targetParams.SetResultsToCompare(nil) // No comparison for target - we're collecting baseline - targetParams.SetUploadCdxResults(false) // Don't upload intermediate results - - targetResults := RunAudit(targetParams) - - if targetResults.GeneralError != nil { - log.Error("Target branch scan failed:", targetResults.GeneralError) - return targetResults - } - log.Info("========== TARGET BRANCH SCAN COMPLETE ==========") - log.Info("") - - // Phase 2: Scan SOURCE branch - log.Info("========== SCANNING SOURCE BRANCH ==========") - sourceParams := cloneAuditParams(params.BaseParams) - sourceParams.SetWorkingDirs([]string{params.SourceWorkingDir}) - sourceParams.SetDiffMode(true) - sourceParams.SetResultsToCompare(targetResults) // SCA will use this for internal diff - sourceParams.SetUploadCdxResults(false) // Don't upload intermediate results - - sourceResults := RunAudit(sourceParams) - - if sourceResults.GeneralError != nil { - log.Error("Source branch scan failed:", sourceResults.GeneralError) - return sourceResults - } - log.Info("========== SOURCE BRANCH SCAN COMPLETE ==========") - log.Info("") - - // Phase 3: Compute JAS diff (SCA diff is already done internally) - log.Info("========== COMPUTING DIFF ==========") - jasDiffResults := results.CompareJasResults(targetResults, sourceResults) - - // Phase 4: Unify SCA (already diffed) + JAS (just diffed) results - diffResults = results.UnifyScaAndJasResults(sourceResults, jasDiffResults) - - // Phase 5: Merge status codes from both target and source - // This ensures partial results filtering works correctly - if ANY scan failed - // on either branch, the corresponding scanner results should be filtered - targetStatus := targetResults.GetStatusCodes() - sourceStatus := sourceResults.GetStatusCodes() - mergedStatus := results.MergeStatusCodes(targetStatus, sourceStatus) - - // Apply merged status to all targets in diff results - for _, target := range diffResults.Targets { - target.ResultsStatus = mergedStatus - } - - // Copy violation status code if set - if targetResults.ViolationsStatusCode != nil || sourceResults.ViolationsStatusCode != nil { - mergedViolationStatus := mergeViolationStatus(targetResults.ViolationsStatusCode, sourceResults.ViolationsStatusCode) - diffResults.ViolationsStatusCode = mergedViolationStatus - } - - log.Info("========== DIFF COMPLETE ==========") - log.Info("Diff results: ", len(diffResults.Targets), " targets with new findings") - - return diffResults -} - -// cloneAuditParams creates a shallow copy of AuditParams for independent configuration -func cloneAuditParams(params *AuditParams) *AuditParams { - cloned := NewAuditParams() - - // Copy basic params - cloned.AuditBasicParams = params.AuditBasicParams - - // Copy all other fields - cloned.SetResultsContext(params.resultsContext) - cloned.SetGitContext(params.gitContext) - cloned.SetBomGenerator(params.bomGenerator) - cloned.SetCustomBomGenBinaryPath(params.customBomGenBinaryPath) - cloned.SetCustomAnalyzerManagerBinaryPath(params.customAnalyzerManagerBinaryPath) - cloned.SetSastRules(params.sastRules) - cloned.SetScaScanStrategy(params.scaScanStrategy) - cloned.SetStartTime(params.startTime) - cloned.SetMultiScanId(params.multiScanId) - cloned.SetMinSeverityFilter(params.minSeverityFilter) - cloned.SetFixableOnly(params.fixableOnly) - cloned.SetThirdPartyApplicabilityScan(params.thirdPartyApplicabilityScan) - cloned.SetThreads(params.threads) - cloned.SetScansResultsOutputDir(params.scanResultsOutputDir) - cloned.SetViolationGenerator(params.violationGenerator) - cloned.SetAllowedLicenses(params.allowedLicenses) - cloned.SetRtResultRepository(params.rtResultRepository) - - return cloned -} - -// mergeViolationStatus returns the worst (non-zero) violation status -func mergeViolationStatus(a, b *int) *int { - if a == nil { - return b - } - if b == nil { - return a - } - if *a != 0 { - return a - } - return b -} From 86e70fd64f2b2d9c141ccb015fc986193d8f774f Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Tue, 13 Jan 2026 12:54:04 +0200 Subject: [PATCH 10/18] Clean up diff functions: remove AM references, simplify comments --- utils/results/results.go | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/utils/results/results.go b/utils/results/results.go index 08facadcd..858be7cea 100644 --- a/utils/results/results.go +++ b/utils/results/results.go @@ -734,12 +734,7 @@ func (jsr *JasScansResults) HasInformationByType(scanType jasutils.JasScanType) return false } -// ============================================================================= -// Parallel PR Scanning Functions -// These functions support running SCA and JAS scans in parallel for PR scanning -// ============================================================================= - -// UnifyScaAndJasResults merges SCA results and JAS diff results into a single SecurityCommandResults +// UnifyScaAndJasResults merges SCA and JAS diff results into a single SecurityCommandResults. func UnifyScaAndJasResults(scaResults, jasDiffResults *SecurityCommandResults) *SecurityCommandResults { // Create unified results based on JAS diff structure unifiedResults := &SecurityCommandResults{ @@ -785,8 +780,8 @@ func UnifyScaAndJasResults(scaResults, jasDiffResults *SecurityCommandResults) * return unifiedResults } -// CompareJasResults computes the diff between target and source JAS results -// Returns only the NEW findings in source that don't exist in target +// CompareJasResults computes the diff between target and source JAS results. +// Returns only NEW findings in source that don't exist in target. func CompareJasResults(targetResults, sourceResults *SecurityCommandResults) *SecurityCommandResults { log.Info("[DIFF] Starting JAS diff calculation") log.Debug("[DIFF] Comparing", len(sourceResults.Targets), "source targets against", len(targetResults.Targets), "target targets") @@ -819,8 +814,7 @@ func CompareJasResults(targetResults, sourceResults *SecurityCommandResults) *Se } } - // Apply diff logic against all collected targets - diffJasResults := applyAmDiffLogicAgainstAll(allTargetJasResults, sourceTarget.JasResults) + diffJasResults := filterExistingFindings(allTargetJasResults, sourceTarget.JasResults) diffTarget := &TargetResults{ ScanTarget: sourceTarget.ScanTarget, @@ -833,8 +827,8 @@ func CompareJasResults(targetResults, sourceResults *SecurityCommandResults) *Se return diffResults } -// applyAmDiffLogicAgainstAll applies the Analyzer Manager's diff logic against all target results -func applyAmDiffLogicAgainstAll(allTargetJasResults []*JasScansResults, sourceJasResults *JasScansResults) *JasScansResults { +// filterExistingFindings removes findings from source that already exist in target. +func filterExistingFindings(allTargetJasResults []*JasScansResults, sourceJasResults *JasScansResults) *JasScansResults { if sourceJasResults == nil { return nil } @@ -875,7 +869,7 @@ func applyAmDiffLogicAgainstAll(allTargetJasResults []*JasScansResults, sourceJa } } - log.Debug("[DIFF-AM] Built target fingerprint set with", len(targetKeys), "unique keys") + log.Debug("[DIFF] Built target fingerprint set with", len(targetKeys), "unique keys") // Count source results before filtering sourceSecrets := countSarifResults(sourceJasResults.JasVulnerabilities.SecretsScanResults) + From 37f3743da0e8d396148769d60b52eaae2f707ab4 Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Tue, 13 Jan 2026 12:58:35 +0200 Subject: [PATCH 11/18] Move diff functions to separate file: utils/results/diff.go --- utils/results/diff.go | 326 ++++++++++++++++++++++++++++++++++++ utils/results/results.go | 350 --------------------------------------- 2 files changed, 326 insertions(+), 350 deletions(-) create mode 100644 utils/results/diff.go diff --git a/utils/results/diff.go b/utils/results/diff.go new file mode 100644 index 000000000..7f735d0e4 --- /dev/null +++ b/utils/results/diff.go @@ -0,0 +1,326 @@ +package results + +import ( + "path/filepath" + "strings" + + "github.com/jfrog/jfrog-cli-security/utils/jasutils" + "github.com/jfrog/jfrog-client-go/utils/log" + "github.com/owenrumney/go-sarif/v3/pkg/report/v210/sarif" +) + +// UnifyScaAndJasResults merges SCA and JAS diff results into a single SecurityCommandResults. +func UnifyScaAndJasResults(scaResults, jasDiffResults *SecurityCommandResults) *SecurityCommandResults { + unifiedResults := &SecurityCommandResults{ + ResultsMetaData: ResultsMetaData{ + EntitledForJas: jasDiffResults.EntitledForJas, + SecretValidation: jasDiffResults.SecretValidation, + CmdType: jasDiffResults.CmdType, + XrayVersion: jasDiffResults.XrayVersion, + XscVersion: jasDiffResults.XscVersion, + MultiScanId: jasDiffResults.MultiScanId, + StartTime: jasDiffResults.StartTime, + ResultContext: jasDiffResults.ResultContext, + }, + } + + for _, scaTarget := range scaResults.Targets { + var jasTarget *TargetResults + for _, jTarget := range jasDiffResults.Targets { + if jTarget.Target == scaTarget.Target { + jasTarget = jTarget + break + } + } + + unifiedTarget := &TargetResults{ + ScanTarget: scaTarget.ScanTarget, + AppsConfigModule: scaTarget.AppsConfigModule, + ScaResults: scaTarget.ScaResults, + JasResults: nil, + } + + if jasTarget != nil { + unifiedTarget.JasResults = jasTarget.JasResults + } + + unifiedResults.Targets = append(unifiedResults.Targets, unifiedTarget) + } + + return unifiedResults +} + +// CompareJasResults computes the diff between target and source JAS results. +// Returns only NEW findings in source that don't exist in target. +func CompareJasResults(targetResults, sourceResults *SecurityCommandResults) *SecurityCommandResults { + log.Info("[DIFF] Starting JAS diff calculation") + log.Debug("[DIFF] Comparing", len(sourceResults.Targets), "source targets against", len(targetResults.Targets), "target targets") + + diffResults := &SecurityCommandResults{ + ResultsMetaData: ResultsMetaData{ + EntitledForJas: sourceResults.EntitledForJas, + SecretValidation: sourceResults.SecretValidation, + CmdType: sourceResults.CmdType, + XrayVersion: sourceResults.XrayVersion, + XscVersion: sourceResults.XscVersion, + MultiScanId: sourceResults.MultiScanId, + StartTime: sourceResults.StartTime, + ResultContext: sourceResults.ResultContext, + }, + } + + for _, sourceTarget := range sourceResults.Targets { + if sourceTarget.JasResults == nil { + continue + } + + var allTargetJasResults []*JasScansResults + for _, targetTarget := range targetResults.Targets { + if targetTarget.JasResults != nil { + allTargetJasResults = append(allTargetJasResults, targetTarget.JasResults) + } + } + + diffJasResults := filterExistingFindings(allTargetJasResults, sourceTarget.JasResults) + + diffTarget := &TargetResults{ + ScanTarget: sourceTarget.ScanTarget, + JasResults: diffJasResults, + } + + diffResults.Targets = append(diffResults.Targets, diffTarget) + } + + return diffResults +} + +// filterExistingFindings removes findings from source that already exist in target. +func filterExistingFindings(allTargetJasResults []*JasScansResults, sourceJasResults *JasScansResults) *JasScansResults { + if sourceJasResults == nil { + return nil + } + + if len(allTargetJasResults) == 0 { + return sourceJasResults + } + + targetKeys := make(map[string]bool) + + for _, targetJasResults := range allTargetJasResults { + if targetJasResults == nil { + continue + } + + for _, targetRun := range targetJasResults.GetVulnerabilitiesResults(jasutils.Secrets) { + extractLocationsOnly(targetRun, targetKeys) + } + for _, targetRun := range targetJasResults.GetViolationsResults(jasutils.Secrets) { + extractLocationsOnly(targetRun, targetKeys) + } + for _, targetRun := range targetJasResults.GetVulnerabilitiesResults(jasutils.IaC) { + extractLocationsOnly(targetRun, targetKeys) + } + for _, targetRun := range targetJasResults.GetViolationsResults(jasutils.IaC) { + extractLocationsOnly(targetRun, targetKeys) + } + for _, targetRun := range targetJasResults.GetVulnerabilitiesResults(jasutils.Sast) { + extractFingerprints(targetRun, targetKeys) + } + for _, targetRun := range targetJasResults.GetViolationsResults(jasutils.Sast) { + extractFingerprints(targetRun, targetKeys) + } + } + + log.Debug("[DIFF] Built target fingerprint set with", len(targetKeys), "unique keys") + + sourceSecrets := countSarifResults(sourceJasResults.JasVulnerabilities.SecretsScanResults) + + countSarifResults(sourceJasResults.JasViolations.SecretsScanResults) + sourceIac := countSarifResults(sourceJasResults.JasVulnerabilities.IacScanResults) + + countSarifResults(sourceJasResults.JasViolations.IacScanResults) + sourceSast := countSarifResults(sourceJasResults.JasVulnerabilities.SastScanResults) + + countSarifResults(sourceJasResults.JasViolations.SastScanResults) + + log.Debug("[DIFF] Source findings before diff - Secrets:", sourceSecrets, "| IaC:", sourceIac, "| SAST:", sourceSast) + + filteredJasResults := &JasScansResults{} + + filteredJasResults.JasVulnerabilities.SecretsScanResults = filterSarifRuns( + sourceJasResults.JasVulnerabilities.SecretsScanResults, targetKeys) + filteredJasResults.JasVulnerabilities.IacScanResults = filterSarifRuns( + sourceJasResults.JasVulnerabilities.IacScanResults, targetKeys) + filteredJasResults.JasVulnerabilities.SastScanResults = filterSarifRuns( + sourceJasResults.JasVulnerabilities.SastScanResults, targetKeys) + + filteredJasResults.JasViolations.SecretsScanResults = filterSarifRuns( + sourceJasResults.JasViolations.SecretsScanResults, targetKeys) + filteredJasResults.JasViolations.IacScanResults = filterSarifRuns( + sourceJasResults.JasViolations.IacScanResults, targetKeys) + filteredJasResults.JasViolations.SastScanResults = filterSarifRuns( + sourceJasResults.JasViolations.SastScanResults, targetKeys) + + diffSecrets := countSarifResults(filteredJasResults.JasVulnerabilities.SecretsScanResults) + + countSarifResults(filteredJasResults.JasViolations.SecretsScanResults) + diffIac := countSarifResults(filteredJasResults.JasVulnerabilities.IacScanResults) + + countSarifResults(filteredJasResults.JasViolations.IacScanResults) + diffSast := countSarifResults(filteredJasResults.JasVulnerabilities.SastScanResults) + + countSarifResults(filteredJasResults.JasViolations.SastScanResults) + + log.Info("[DIFF] New findings after diff - Secrets:", diffSecrets, "| IaC:", diffIac, "| SAST:", diffSast) + log.Info("[DIFF] Filtered out - Secrets:", sourceSecrets-diffSecrets, "| IaC:", sourceIac-diffIac, "| SAST:", sourceSast-diffSast) + + return filteredJasResults +} + +func countSarifResults(runs []*sarif.Run) int { + count := 0 + for _, run := range runs { + if run != nil { + count += len(run.Results) + } + } + return count +} + +func extractFingerprints(run *sarif.Run, targetKeys map[string]bool) { + for _, result := range run.Results { + if result.Fingerprints != nil { + key := getResultFingerprint(result) + if key != "" { + targetKeys[key] = true + } + } else { + for _, location := range result.Locations { + key := getRelativeLocationFileName(location, run.Invocations) + getLocationSnippetText(location) + targetKeys[key] = true + } + } + } +} + +func extractLocationsOnly(run *sarif.Run, targetKeys map[string]bool) { + for _, result := range run.Results { + for _, location := range result.Locations { + key := getRelativeLocationFileName(location, run.Invocations) + getLocationSnippetText(location) + targetKeys[key] = true + } + } +} + +func getResultFingerprint(result *sarif.Result) string { + if result.Fingerprints != nil { + if value, ok := result.Fingerprints["precise_sink_and_sink_function"]; ok { + return value + } + } + return "" +} + +func getLocationSnippetText(location *sarif.Location) string { + if location.PhysicalLocation != nil && location.PhysicalLocation.Region != nil && + location.PhysicalLocation.Region.Snippet != nil && location.PhysicalLocation.Region.Snippet.Text != nil { + return *location.PhysicalLocation.Region.Snippet.Text + } + return "" +} + +func getRelativeLocationFileName(location *sarif.Location, invocations []*sarif.Invocation) string { + wd := "" + if len(invocations) > 0 { + wd = getInvocationWorkingDirectory(invocations[0]) + } + filePath := getLocationFileName(location) + if filePath != "" { + return extractRelativePath(filePath, wd) + } + return "" +} + +func getInvocationWorkingDirectory(invocation *sarif.Invocation) string { + if invocation != nil && invocation.WorkingDirectory != nil && invocation.WorkingDirectory.URI != nil { + return *invocation.WorkingDirectory.URI + } + return "" +} + +func getLocationFileName(location *sarif.Location) string { + if location != nil && location.PhysicalLocation != nil && location.PhysicalLocation.ArtifactLocation != nil && location.PhysicalLocation.ArtifactLocation.URI != nil { + return *location.PhysicalLocation.ArtifactLocation.URI + } + return "" +} + +func extractRelativePath(resultPath string, projectRoot string) string { + resultPath = strings.TrimPrefix(resultPath, "file:///private") + resultPath = strings.TrimPrefix(resultPath, "file:///") + projectRoot = strings.TrimPrefix(projectRoot, "file:///private") + projectRoot = strings.TrimPrefix(projectRoot, "file:///") + projectRoot = strings.TrimPrefix(projectRoot, "/") + + relativePath := strings.ReplaceAll(resultPath, projectRoot, "") + trimSlash := strings.TrimPrefix(relativePath, string(filepath.Separator)) + return strings.TrimPrefix(trimSlash, "/") +} + +// MergeStatusCodes merges two ResultsStatus, taking the worst (non-zero) status for each scanner. +func MergeStatusCodes(target, source ResultsStatus) ResultsStatus { + merged := ResultsStatus{} + merged.SbomScanStatusCode = mergeStatusCode(target.SbomScanStatusCode, source.SbomScanStatusCode) + merged.ScaScanStatusCode = mergeStatusCode(target.ScaScanStatusCode, source.ScaScanStatusCode) + merged.ContextualAnalysisStatusCode = mergeStatusCode(target.ContextualAnalysisStatusCode, source.ContextualAnalysisStatusCode) + merged.SecretsScanStatusCode = mergeStatusCode(target.SecretsScanStatusCode, source.SecretsScanStatusCode) + merged.IacScanStatusCode = mergeStatusCode(target.IacScanStatusCode, source.IacScanStatusCode) + merged.SastScanStatusCode = mergeStatusCode(target.SastScanStatusCode, source.SastScanStatusCode) + merged.ViolationsStatusCode = mergeStatusCode(target.ViolationsStatusCode, source.ViolationsStatusCode) + return merged +} + +func mergeStatusCode(a, b *int) *int { + if a == nil { + return b + } + if b == nil { + return a + } + if *a != 0 { + return a + } + return b +} + +func filterSarifRuns(sourceRuns []*sarif.Run, targetKeys map[string]bool) []*sarif.Run { + var filteredRuns []*sarif.Run + + for _, run := range sourceRuns { + var filteredResults []*sarif.Result + + for _, result := range run.Results { + if result.Fingerprints != nil { + if !targetKeys[getResultFingerprint(result)] { + filteredResults = append(filteredResults, result) + } + } else { + var filteredLocations []*sarif.Location + for _, location := range result.Locations { + key := getRelativeLocationFileName(location, run.Invocations) + getLocationSnippetText(location) + if !targetKeys[key] { + filteredLocations = append(filteredLocations, location) + } + } + + if len(filteredLocations) > 0 { + newResult := *result + newResult.Locations = filteredLocations + filteredResults = append(filteredResults, &newResult) + } + } + } + + if len(filteredResults) > 0 { + filteredRun := *run + filteredRun.Results = filteredResults + filteredRuns = append(filteredRuns, &filteredRun) + } + } + + return filteredRuns +} diff --git a/utils/results/results.go b/utils/results/results.go index 858be7cea..4e6baedce 100644 --- a/utils/results/results.go +++ b/utils/results/results.go @@ -3,8 +3,6 @@ package results import ( "errors" "fmt" - "path/filepath" - "strings" "sync" "time" @@ -733,351 +731,3 @@ func (jsr *JasScansResults) HasInformationByType(scanType jasutils.JasScanType) } return false } - -// UnifyScaAndJasResults merges SCA and JAS diff results into a single SecurityCommandResults. -func UnifyScaAndJasResults(scaResults, jasDiffResults *SecurityCommandResults) *SecurityCommandResults { - // Create unified results based on JAS diff structure - unifiedResults := &SecurityCommandResults{ - ResultsMetaData: ResultsMetaData{ - EntitledForJas: jasDiffResults.EntitledForJas, - SecretValidation: jasDiffResults.SecretValidation, - CmdType: jasDiffResults.CmdType, - XrayVersion: jasDiffResults.XrayVersion, - XscVersion: jasDiffResults.XscVersion, - MultiScanId: jasDiffResults.MultiScanId, - StartTime: jasDiffResults.StartTime, - ResultContext: jasDiffResults.ResultContext, - }, - } - - // Merge targets from both SCA and JAS results - for _, scaTarget := range scaResults.Targets { - // Find corresponding JAS target - var jasTarget *TargetResults - for _, jTarget := range jasDiffResults.Targets { - if jTarget.Target == scaTarget.Target { - jasTarget = jTarget - break - } - } - - // Create unified target with both SCA and JAS results - unifiedTarget := &TargetResults{ - ScanTarget: scaTarget.ScanTarget, - AppsConfigModule: scaTarget.AppsConfigModule, - ScaResults: scaTarget.ScaResults, - JasResults: nil, - } - - // Add JAS diff results if available - if jasTarget != nil { - unifiedTarget.JasResults = jasTarget.JasResults - } - - unifiedResults.Targets = append(unifiedResults.Targets, unifiedTarget) - } - - return unifiedResults -} - -// CompareJasResults computes the diff between target and source JAS results. -// Returns only NEW findings in source that don't exist in target. -func CompareJasResults(targetResults, sourceResults *SecurityCommandResults) *SecurityCommandResults { - log.Info("[DIFF] Starting JAS diff calculation") - log.Debug("[DIFF] Comparing", len(sourceResults.Targets), "source targets against", len(targetResults.Targets), "target targets") - - // Create diff results based on source structure - diffResults := &SecurityCommandResults{ - ResultsMetaData: ResultsMetaData{ - EntitledForJas: sourceResults.EntitledForJas, - SecretValidation: sourceResults.SecretValidation, - CmdType: sourceResults.CmdType, - XrayVersion: sourceResults.XrayVersion, - XscVersion: sourceResults.XscVersion, - MultiScanId: sourceResults.MultiScanId, - StartTime: sourceResults.StartTime, - ResultContext: sourceResults.ResultContext, - }, - } - - // Compare each source target against ALL target targets - for _, sourceTarget := range sourceResults.Targets { - if sourceTarget.JasResults == nil { - continue - } - - // Collect ALL target JAS results to compare against - var allTargetJasResults []*JasScansResults - for _, targetTarget := range targetResults.Targets { - if targetTarget.JasResults != nil { - allTargetJasResults = append(allTargetJasResults, targetTarget.JasResults) - } - } - - diffJasResults := filterExistingFindings(allTargetJasResults, sourceTarget.JasResults) - - diffTarget := &TargetResults{ - ScanTarget: sourceTarget.ScanTarget, - JasResults: diffJasResults, - } - - diffResults.Targets = append(diffResults.Targets, diffTarget) - } - - return diffResults -} - -// filterExistingFindings removes findings from source that already exist in target. -func filterExistingFindings(allTargetJasResults []*JasScansResults, sourceJasResults *JasScansResults) *JasScansResults { - if sourceJasResults == nil { - return nil - } - - // If no target results, return all source results (everything is new) - if len(allTargetJasResults) == 0 { - return sourceJasResults - } - - // Build target fingerprint set from ALL target results - targetKeys := make(map[string]bool) - - for _, targetJasResults := range allTargetJasResults { - if targetJasResults == nil { - continue - } - - // Extract from target secrets results (location-based) - for _, targetRun := range targetJasResults.GetVulnerabilitiesResults(jasutils.Secrets) { - extractLocationsOnly(targetRun, targetKeys) - } - for _, targetRun := range targetJasResults.GetViolationsResults(jasutils.Secrets) { - extractLocationsOnly(targetRun, targetKeys) - } - // Extract from target IaC results (location-based) - for _, targetRun := range targetJasResults.GetVulnerabilitiesResults(jasutils.IaC) { - extractLocationsOnly(targetRun, targetKeys) - } - for _, targetRun := range targetJasResults.GetViolationsResults(jasutils.IaC) { - extractLocationsOnly(targetRun, targetKeys) - } - // Extract from target SAST results (fingerprint-based) - for _, targetRun := range targetJasResults.GetVulnerabilitiesResults(jasutils.Sast) { - extractFingerprints(targetRun, targetKeys) - } - for _, targetRun := range targetJasResults.GetViolationsResults(jasutils.Sast) { - extractFingerprints(targetRun, targetKeys) - } - } - - log.Debug("[DIFF] Built target fingerprint set with", len(targetKeys), "unique keys") - - // Count source results before filtering - sourceSecrets := countSarifResults(sourceJasResults.JasVulnerabilities.SecretsScanResults) + - countSarifResults(sourceJasResults.JasViolations.SecretsScanResults) - sourceIac := countSarifResults(sourceJasResults.JasVulnerabilities.IacScanResults) + - countSarifResults(sourceJasResults.JasViolations.IacScanResults) - sourceSast := countSarifResults(sourceJasResults.JasVulnerabilities.SastScanResults) + - countSarifResults(sourceJasResults.JasViolations.SastScanResults) - - log.Debug("[DIFF] Source findings before diff - Secrets:", sourceSecrets, "| IaC:", sourceIac, "| SAST:", sourceSast) - - // Filter source results - keep only what's NOT in target - filteredJasResults := &JasScansResults{} - - // Filter vulnerabilities - filteredJasResults.JasVulnerabilities.SecretsScanResults = filterSarifRuns( - sourceJasResults.JasVulnerabilities.SecretsScanResults, targetKeys) - filteredJasResults.JasVulnerabilities.IacScanResults = filterSarifRuns( - sourceJasResults.JasVulnerabilities.IacScanResults, targetKeys) - filteredJasResults.JasVulnerabilities.SastScanResults = filterSarifRuns( - sourceJasResults.JasVulnerabilities.SastScanResults, targetKeys) - - // Filter violations - filteredJasResults.JasViolations.SecretsScanResults = filterSarifRuns( - sourceJasResults.JasViolations.SecretsScanResults, targetKeys) - filteredJasResults.JasViolations.IacScanResults = filterSarifRuns( - sourceJasResults.JasViolations.IacScanResults, targetKeys) - filteredJasResults.JasViolations.SastScanResults = filterSarifRuns( - sourceJasResults.JasViolations.SastScanResults, targetKeys) - - // Count filtered results after diff - diffSecrets := countSarifResults(filteredJasResults.JasVulnerabilities.SecretsScanResults) + - countSarifResults(filteredJasResults.JasViolations.SecretsScanResults) - diffIac := countSarifResults(filteredJasResults.JasVulnerabilities.IacScanResults) + - countSarifResults(filteredJasResults.JasViolations.IacScanResults) - diffSast := countSarifResults(filteredJasResults.JasVulnerabilities.SastScanResults) + - countSarifResults(filteredJasResults.JasViolations.SastScanResults) - - log.Info("[DIFF] New findings after diff - Secrets:", diffSecrets, "| IaC:", diffIac, "| SAST:", diffSast) - log.Info("[DIFF] Filtered out - Secrets:", sourceSecrets-diffSecrets, "| IaC:", sourceIac-diffIac, "| SAST:", sourceSast-diffSast) - - return filteredJasResults -} - -// countSarifResults counts total results across all SARIF runs -func countSarifResults(runs []*sarif.Run) int { - count := 0 - for _, run := range runs { - if run != nil { - count += len(run.Results) - } - } - return count -} - -// extractFingerprints extracts fingerprints from SARIF run (for SAST) -func extractFingerprints(run *sarif.Run, targetKeys map[string]bool) { - for _, result := range run.Results { - if result.Fingerprints != nil { - key := getResultFingerprint(result) - if key != "" { - targetKeys[key] = true - } - } else { - for _, location := range result.Locations { - key := getRelativeLocationFileName(location, run.Invocations) + getLocationSnippetText(location) - targetKeys[key] = true - } - } - } -} - -// extractLocationsOnly extracts locations (for Secrets and IaC - no fingerprints) -func extractLocationsOnly(run *sarif.Run, targetKeys map[string]bool) { - for _, result := range run.Results { - for _, location := range result.Locations { - key := getRelativeLocationFileName(location, run.Invocations) + getLocationSnippetText(location) - targetKeys[key] = true - } - } -} - -// getResultFingerprint returns the SAST fingerprint from a result -func getResultFingerprint(result *sarif.Result) string { - if result.Fingerprints != nil { - if value, ok := result.Fingerprints["precise_sink_and_sink_function"]; ok { - return value - } - } - return "" -} - -// getLocationSnippetText returns the snippet text from a location -func getLocationSnippetText(location *sarif.Location) string { - if location.PhysicalLocation != nil && location.PhysicalLocation.Region != nil && - location.PhysicalLocation.Region.Snippet != nil && location.PhysicalLocation.Region.Snippet.Text != nil { - return *location.PhysicalLocation.Region.Snippet.Text - } - return "" -} - -// getRelativeLocationFileName returns the relative file path from a location -func getRelativeLocationFileName(location *sarif.Location, invocations []*sarif.Invocation) string { - wd := "" - if len(invocations) > 0 { - wd = getInvocationWorkingDirectory(invocations[0]) - } - filePath := getLocationFileName(location) - if filePath != "" { - return extractRelativePath(filePath, wd) - } - return "" -} - -func getInvocationWorkingDirectory(invocation *sarif.Invocation) string { - if invocation != nil && invocation.WorkingDirectory != nil && invocation.WorkingDirectory.URI != nil { - return *invocation.WorkingDirectory.URI - } - return "" -} - -func getLocationFileName(location *sarif.Location) string { - if location != nil && location.PhysicalLocation != nil && location.PhysicalLocation.ArtifactLocation != nil && location.PhysicalLocation.ArtifactLocation.URI != nil { - return *location.PhysicalLocation.ArtifactLocation.URI - } - return "" -} - -func extractRelativePath(resultPath string, projectRoot string) string { - // Remove OS-specific file prefix - resultPath = strings.TrimPrefix(resultPath, "file:///private") - resultPath = strings.TrimPrefix(resultPath, "file:///") - projectRoot = strings.TrimPrefix(projectRoot, "file:///private") - projectRoot = strings.TrimPrefix(projectRoot, "file:///") - projectRoot = strings.TrimPrefix(projectRoot, "/") - - // Get relative path (removes temp directory) - relativePath := strings.ReplaceAll(resultPath, projectRoot, "") - trimSlash := strings.TrimPrefix(relativePath, string(filepath.Separator)) - return strings.TrimPrefix(trimSlash, "/") -} - -// MergeStatusCodes merges two ResultsStatus structs, taking the worst (non-zero) status for each scanner -// This is used when combining target and source results to ensure partial results filtering works correctly -func MergeStatusCodes(target, source ResultsStatus) ResultsStatus { - merged := ResultsStatus{} - merged.SbomScanStatusCode = mergeStatusCode(target.SbomScanStatusCode, source.SbomScanStatusCode) - merged.ScaScanStatusCode = mergeStatusCode(target.ScaScanStatusCode, source.ScaScanStatusCode) - merged.ContextualAnalysisStatusCode = mergeStatusCode(target.ContextualAnalysisStatusCode, source.ContextualAnalysisStatusCode) - merged.SecretsScanStatusCode = mergeStatusCode(target.SecretsScanStatusCode, source.SecretsScanStatusCode) - merged.IacScanStatusCode = mergeStatusCode(target.IacScanStatusCode, source.IacScanStatusCode) - merged.SastScanStatusCode = mergeStatusCode(target.SastScanStatusCode, source.SastScanStatusCode) - merged.ViolationsStatusCode = mergeStatusCode(target.ViolationsStatusCode, source.ViolationsStatusCode) - return merged -} - -// mergeStatusCode returns the worst (non-zero) status code between two -func mergeStatusCode(a, b *int) *int { - if a == nil { - return b - } - if b == nil { - return a - } - // Return the non-zero value (failed status), or zero if both succeeded - if *a != 0 { - return a - } - return b -} - -// filterSarifRuns filters SARIF runs, keeping only results that are NOT in target -func filterSarifRuns(sourceRuns []*sarif.Run, targetKeys map[string]bool) []*sarif.Run { - var filteredRuns []*sarif.Run - - for _, run := range sourceRuns { - var filteredResults []*sarif.Result - - for _, result := range run.Results { - if result.Fingerprints != nil { - // Use fingerprint for matching (SAST) - if !targetKeys[getResultFingerprint(result)] { - filteredResults = append(filteredResults, result) - } - } else { - // Use location for matching (Secrets, IaC) - var filteredLocations []*sarif.Location - for _, location := range result.Locations { - key := getRelativeLocationFileName(location, run.Invocations) + getLocationSnippetText(location) - if !targetKeys[key] { - filteredLocations = append(filteredLocations, location) - } - } - - if len(filteredLocations) > 0 { - newResult := *result - newResult.Locations = filteredLocations - filteredResults = append(filteredResults, &newResult) - } - } - } - - if len(filteredResults) > 0 { - filteredRun := *run - filteredRun.Results = filteredResults - filteredRuns = append(filteredRuns, &filteredRun) - } - } - - return filteredRuns -} From b0eb167f7a37669de0774e014b9bdc01c743a5a1 Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Tue, 13 Jan 2026 13:02:53 +0200 Subject: [PATCH 12/18] Remove extra comments from logCollector field --- commands/audit/auditbasicparams.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/commands/audit/auditbasicparams.go b/commands/audit/auditbasicparams.go index f399f9d60..a4d7f75ff 100644 --- a/commands/audit/auditbasicparams.go +++ b/commands/audit/auditbasicparams.go @@ -82,8 +82,6 @@ type AuditBasicParams struct { xscVersion string configProfile *xscservices.ConfigProfile solutionFilePath string - // logCollector provides isolated log capture for this audit. - // When set, all logs from this audit are captured in the collector's buffer. logCollector *LogCollector } From 957b363228bbeca3ee8ae457b453562e20987125 Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Tue, 13 Jan 2026 13:14:13 +0200 Subject: [PATCH 13/18] Add diff function tests adapted from analyzer-manager --- tests/testdata/other/diff-scan/results.sarif | 142 ++++ tests/testdata/other/diff-scan/target.sarif | 1 + utils/results/diff_test.go | 762 +++++++++++++++++++ 3 files changed, 905 insertions(+) create mode 100644 tests/testdata/other/diff-scan/results.sarif create mode 100644 tests/testdata/other/diff-scan/target.sarif create mode 100644 utils/results/diff_test.go diff --git a/tests/testdata/other/diff-scan/results.sarif b/tests/testdata/other/diff-scan/results.sarif new file mode 100644 index 000000000..3375e9301 --- /dev/null +++ b/tests/testdata/other/diff-scan/results.sarif @@ -0,0 +1,142 @@ +{ + "runs": [ + { + "tool": { + "driver": { + "name": "JFrog Secrets scanner", + "rules": [ + { + "id": "REQ.SECRET.GENERIC.TEXT", + "properties": { + "conclusion": "negative", + "applicability": "applicable", + "scanner_id": null + }, + "fullDescription": { + "text": "Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n", + "markdown": "Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n" + }, + "shortDescription": { + "text": "Scanner for REQ.SECRET.GENERIC.TEXT" + } + }, + { + "id": "REQ.SECRET.GENERIC.CODE", + "properties": { + "conclusion": "private", + "applicability": "undetermined", + "scanner_id": null + }, + "fullDescription": { + "text": "Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n", + "markdown": "Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n" + }, + "shortDescription": { + "text": "Scanner for REQ.SECRET.GENERIC.CODE" + } + }, + { + "id": "REQ.SECRET.KEYS", + "properties": { + "conclusion": "private", + "applicability": "undetermined", + "scanner_id": "1235", + "undetermined_reason": "" + }, + "fullDescription": { + "text": "\nStoring an API key in the image could lead to several risks.\n\nIf the key is associated with a wide scope of privileges, attackers could extract it from a single image or firmware and use it maliciously to attack many targets. For example, if the embedded key allows querying/modifying data for all cloud user accounts, without per-user authentication, the attackers who extract it would gain access to system-wide data.\n\nIf the cloud/SaaS provider bills by key usage - for example, every million queries cost the key's owner a fixed sum of money - attackers could use the keys for their own purposes (or just as a form of vandalism), incurring a large cost to the legitimate user or operator.\n\n## Best practices\n\nUse narrow scopes for stored API keys. As much as possible, API keys should be unique per host and require additional authentication with the user's individual credentials for any sensitive actions.\n\nAvoid placing keys whose use incurs costs directly in the image. Store the key with any software or hardware protection available on the host for key storage (such as operating system key-stores, hardware cryptographic storage mechanisms or cloud-managed secure storage services such as [AWS KMS](https://aws.amazon.com/kms/)).\n\nTokens that were detected as exposed should be revoked and replaced -\n\n* [AWS Key Revocation](https://aws.amazon.com/premiumsupport/knowledge-center/delete-access-key/#:~:text=If%20you%20see%20a%20warning,the%20confirmation%20box%2C%20choose%20Deactivate.)\n* [GCP Key Revocation](https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudIAM/delete-api-keys.html)\n* [Azure Key Revocation](https://docs.microsoft.com/en-us/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=azure-devops&tabs=Windows#revoke-a-pat)\n* [GitHub Key Revocation](https://docs.github.com/en/rest/apps/oauth-applications#delete-an-app-authorization)\n", + "markdown": "\nStoring an API key in the image could lead to several risks.\n\nIf the key is associated with a wide scope of privileges, attackers could extract it from a single image or firmware and use it maliciously to attack many targets. For example, if the embedded key allows querying/modifying data for all cloud user accounts, without per-user authentication, the attackers who extract it would gain access to system-wide data.\n\nIf the cloud/SaaS provider bills by key usage - for example, every million queries cost the key's owner a fixed sum of money - attackers could use the keys for their own purposes (or just as a form of vandalism), incurring a large cost to the legitimate user or operator.\n\n## Best practices\n\nUse narrow scopes for stored API keys. As much as possible, API keys should be unique per host and require additional authentication with the user's individual credentials for any sensitive actions.\n\nAvoid placing keys whose use incurs costs directly in the image. Store the key with any software or hardware protection available on the host for key storage (such as operating system key-stores, hardware cryptographic storage mechanisms or cloud-managed secure storage services such as [AWS KMS](https://aws.amazon.com/kms/)).\n\nTokens that were detected as exposed should be revoked and replaced -\n\n* [AWS Key Revocation](https://aws.amazon.com/premiumsupport/knowledge-center/delete-access-key/#:~:text=If%20you%20see%20a%20warning,the%20confirmation%20box%2C%20choose%20Deactivate.)\n* [GCP Key Revocation](https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudIAM/delete-api-keys.html)\n* [Azure Key Revocation](https://docs.microsoft.com/en-us/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=azure-devops&tabs=Windows#revoke-a-pat)\n* [GitHub Key Revocation](https://docs.github.com/en/rest/apps/oauth-applications#delete-an-app-authorization)\n" + }, + "shortDescription": { + "text": "Scanner for REQ.SECRET.KEYS" + } + }, + { + "id": "REQ.CRED.PUBLIC-ONLY", + "properties": { + "conclusion": "private", + "applicability": "undetermined", + "scanner_id": "125", + "undetermined_reason": "" + }, + "fullDescription": { + "text": "", + "markdown": "" + }, + "shortDescription": { + "text": "Scanner for REQ.CRED.PUBLIC-ONLY" + } + }, + { + "id": "REQ.SECRET.GENERIC.URL-TEXT", + "properties": { + "conclusion": "private", + "applicability": "undetermined", + "scanner_id": null + }, + "fullDescription": { + "text": "Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n", + "markdown": "Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n" + }, + "shortDescription": { + "text": "Scanner for REQ.SECRET.GENERIC.URL-TEXT" + } + } + ], + "version": "1.0", + "informationUri": "https://jfrog.com/help/r/jfrog-security-documentation/jfrog-advanced-security" + } + }, + "invocations": [ + { + "arguments": [ + "/Users/assafa/.jfrog/dependencies/analyzerManager/jas_scanner/jas_scanner", + "scan", + "/var/folders/xv/th4cksxn7jv9wjrdnn1h4tj00000gq/T/jfrog.cli.temp.-1747638624-1289062780/Secrets_1747638640/config.yaml" + ], + "executionSuccessful": true, + "workingDirectory": { + "uri": "file:///Users/assafa/.jfrog/dependencies/analyzerManager" + } + } + ], + "results": [ + { + "message": { + "text": "Hardcoded secrets were found" + }, + "level": "error", + "locations": [ + { + "physicalLocation": { + "region": { + "snippet": { + "text": "password: jnvkjcxnjvxnvk22222" + }, + "endColumn": 30, + "endLine": 1, + "startColumn": 1, + "startLine": 1 + }, + "artifactLocation": { + "uri": "file:///private/var/folders/xv/th4cksxn7jv9wjrdnn1h4tj00000gq/T/jfrog.cli.temp.-1747638503-538392025/TOKENS" + } + } + } + ], + "properties": { + "tokenValidation": "", + "metadata": "" + }, + "suppressions": [], + "partialFingerprints": { + "jfrogSecret": "085de62ad4aa0dc22cf7d733811687c08b5517c4414326723e05e75a822ee58d" + }, + "ruleId": "REQ.SECRET.GENERIC.TEXT" + } + ] + } + ], + "version": "2.1.0", + "$schema": "https://docs.oasis-open.org/sarif/sarif/v2.1.0/cos02/schemas/sarif-schema-2.1.0.json" +} \ No newline at end of file diff --git a/tests/testdata/other/diff-scan/target.sarif b/tests/testdata/other/diff-scan/target.sarif new file mode 100644 index 000000000..4356cd940 --- /dev/null +++ b/tests/testdata/other/diff-scan/target.sarif @@ -0,0 +1 @@ +{"version":"2.1.0","$schema":"https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json","runs":[{"tool":{"driver":{"informationUri":"https://jfrog.com/help/r/jfrog-security-documentation/jfrog-advanced-security","name":"JFrog Secrets scanner","rules":[{"id":"REQ.SECRET.GENERIC.TEXT","shortDescription":{"text":"Scanner for REQ.SECRET.GENERIC.TEXT"},"fullDescription":{"text":"Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n","markdown":"Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n"},"properties":{"applicability":"applicable","conclusion":"negative","scanner_id":null,"security-severity":"8.9"}},{"id":"REQ.SECRET.GENERIC.CODE","shortDescription":{"text":"Scanner for REQ.SECRET.GENERIC.CODE"},"fullDescription":{"text":"Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n","markdown":"Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n"},"properties":{"applicability":"undetermined","conclusion":"private","scanner_id":null}},{"id":"REQ.SECRET.KEYS","shortDescription":{"text":"Scanner for REQ.SECRET.KEYS"},"fullDescription":{"text":"\nStoring an API key in the image could lead to several risks.\n\nIf the key is associated with a wide scope of privileges, attackers could extract it from a single image or firmware and use it maliciously to attack many targets. For example, if the embedded key allows querying/modifying data for all cloud user accounts, without per-user authentication, the attackers who extract it would gain access to system-wide data.\n\nIf the cloud/SaaS provider bills by key usage - for example, every million queries cost the key's owner a fixed sum of money - attackers could use the keys for their own purposes (or just as a form of vandalism), incurring a large cost to the legitimate user or operator.\n\n## Best practices\n\nUse narrow scopes for stored API keys. As much as possible, API keys should be unique per host and require additional authentication with the user's individual credentials for any sensitive actions.\n\nAvoid placing keys whose use incurs costs directly in the image. Store the key with any software or hardware protection available on the host for key storage (such as operating system key-stores, hardware cryptographic storage mechanisms or cloud-managed secure storage services such as [AWS KMS](https://aws.amazon.com/kms/)).\n\nTokens that were detected as exposed should be revoked and replaced -\n\n* [AWS Key Revocation](https://aws.amazon.com/premiumsupport/knowledge-center/delete-access-key/#:~:text=If%20you%20see%20a%20warning,the%20confirmation%20box%2C%20choose%20Deactivate.)\n* [GCP Key Revocation](https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudIAM/delete-api-keys.html)\n* [Azure Key Revocation](https://docs.microsoft.com/en-us/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=azure-devops&tabs=Windows#revoke-a-pat)\n* [GitHub Key Revocation](https://docs.github.com/en/rest/apps/oauth-applications#delete-an-app-authorization)\n","markdown":"\nStoring an API key in the image could lead to several risks.\n\nIf the key is associated with a wide scope of privileges, attackers could extract it from a single image or firmware and use it maliciously to attack many targets. For example, if the embedded key allows querying/modifying data for all cloud user accounts, without per-user authentication, the attackers who extract it would gain access to system-wide data.\n\nIf the cloud/SaaS provider bills by key usage - for example, every million queries cost the key's owner a fixed sum of money - attackers could use the keys for their own purposes (or just as a form of vandalism), incurring a large cost to the legitimate user or operator.\n\n## Best practices\n\nUse narrow scopes for stored API keys. As much as possible, API keys should be unique per host and require additional authentication with the user's individual credentials for any sensitive actions.\n\nAvoid placing keys whose use incurs costs directly in the image. Store the key with any software or hardware protection available on the host for key storage (such as operating system key-stores, hardware cryptographic storage mechanisms or cloud-managed secure storage services such as [AWS KMS](https://aws.amazon.com/kms/)).\n\nTokens that were detected as exposed should be revoked and replaced -\n\n* [AWS Key Revocation](https://aws.amazon.com/premiumsupport/knowledge-center/delete-access-key/#:~:text=If%20you%20see%20a%20warning,the%20confirmation%20box%2C%20choose%20Deactivate.)\n* [GCP Key Revocation](https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudIAM/delete-api-keys.html)\n* [Azure Key Revocation](https://docs.microsoft.com/en-us/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=azure-devops&tabs=Windows#revoke-a-pat)\n* [GitHub Key Revocation](https://docs.github.com/en/rest/apps/oauth-applications#delete-an-app-authorization)\n"},"properties":{"applicability":"undetermined","conclusion":"private","scanner_id":"1235","undetermined_reason":""}},{"id":"REQ.CRED.PUBLIC-ONLY","shortDescription":{"text":"Scanner for REQ.CRED.PUBLIC-ONLY"},"fullDescription":{"text":"","markdown":""},"properties":{"applicability":"undetermined","conclusion":"private","scanner_id":"125","undetermined_reason":""}},{"id":"REQ.SECRET.GENERIC.URL-TEXT","shortDescription":{"text":"Scanner for REQ.SECRET.GENERIC.URL-TEXT"},"fullDescription":{"text":"Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n","markdown":"Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n"},"properties":{"applicability":"undetermined","conclusion":"private","scanner_id":null}}],"version":"1.0"}},"invocations":[{"arguments":["/Users/assafa/.jfrog/dependencies/analyzerManager/jas_scanner/jas_scanner","scan","/var/folders/xv/th4cksxn7jv9wjrdnn1h4tj00000gq/T/jfrog.cli.temp.-1747638540-1087327516/Secrets_1747638541/config.yaml"],"executionSuccessful":true,"workingDirectory":{"uri":"file:////var/folders/xv/th4cksxn7jv9wjrdnn1h4tj00000gq/T/jfrog.cli.temp.-1747638503-4092437536"}}],"results":[{"properties":{"metadata":"","tokenValidation":""},"ruleId":"REQ.SECRET.GENERIC.TEXT","level":"error","message":{"text":"Hardcoded secrets were found"},"locations":[{"physicalLocation":{"artifactLocation":{"uri":"file:///private/var/folders/xv/th4cksxn7jv9wjrdnn1h4tj00000gq/T/jfrog.cli.temp.-1747638503-4092437536/TOKENS"},"region":{"startLine":1,"startColumn":1,"endLine":1,"endColumn":30,"snippet":{"text":"password: jnvkjcxnjvxnvk22222"}}}}],"partialFingerprints":{"jfrogSecret":"085de62ad4aa0dc22cf7d733811687c08b5517c4414326723e05e75a822ee58d"}}]}]} \ No newline at end of file diff --git a/utils/results/diff_test.go b/utils/results/diff_test.go new file mode 100644 index 000000000..b43425e25 --- /dev/null +++ b/utils/results/diff_test.go @@ -0,0 +1,762 @@ +package results + +import ( + "os" + "path/filepath" + "testing" + + "github.com/owenrumney/go-sarif/v3/pkg/report/v210/sarif" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func strPtr(s string) *string { + return &s +} + +func intPtr(i int) *int { + return &i +} + +func TestFilterSarifRuns_LocationBased(t *testing.T) { + testCases := []struct { + name string + targetRuns []*sarif.Run + sourceRuns []*sarif.Run + expectedCount int + expectedFiles []string + }{ + { + name: "new issues in source - empty target", + targetRuns: []*sarif.Run{{Results: []*sarif.Result{}}}, + sourceRuns: []*sarif.Run{ + { + Results: []*sarif.Result{ + { + Locations: []*sarif.Location{ + {PhysicalLocation: &sarif.PhysicalLocation{ + ArtifactLocation: &sarif.ArtifactLocation{URI: strPtr("file1.js")}, + }}, + }, + }, + }, + }, + }, + expectedCount: 1, + expectedFiles: []string{"file1.js"}, + }, + { + name: "source has no new issues - same file exists in target", + targetRuns: []*sarif.Run{ + { + Results: []*sarif.Result{ + { + Locations: []*sarif.Location{ + {PhysicalLocation: &sarif.PhysicalLocation{ + ArtifactLocation: &sarif.ArtifactLocation{URI: strPtr("file1.js")}, + }}, + }, + }, + }, + }, + }, + sourceRuns: []*sarif.Run{ + { + Results: []*sarif.Result{ + { + Locations: []*sarif.Location{ + {PhysicalLocation: &sarif.PhysicalLocation{ + ArtifactLocation: &sarif.ArtifactLocation{URI: strPtr("file1.js")}, + }}, + }, + }, + }, + }, + }, + expectedCount: 0, + expectedFiles: []string{}, + }, + { + name: "multiple issues - partial match", + targetRuns: []*sarif.Run{ + { + Results: []*sarif.Result{ + { + Locations: []*sarif.Location{ + {PhysicalLocation: &sarif.PhysicalLocation{ + ArtifactLocation: &sarif.ArtifactLocation{URI: strPtr("file1.js")}, + }}, + }, + }, + }, + }, + }, + sourceRuns: []*sarif.Run{ + { + Results: []*sarif.Result{ + { + Locations: []*sarif.Location{ + {PhysicalLocation: &sarif.PhysicalLocation{ + ArtifactLocation: &sarif.ArtifactLocation{URI: strPtr("file2.js")}, + }}, + {PhysicalLocation: &sarif.PhysicalLocation{ + ArtifactLocation: &sarif.ArtifactLocation{URI: strPtr("file1.js")}, + }}, + }, + }, + }, + }, + }, + expectedCount: 1, + expectedFiles: []string{"file2.js"}, + }, + { + name: "issue removed in source", + targetRuns: []*sarif.Run{ + { + Results: []*sarif.Result{ + { + Locations: []*sarif.Location{ + {PhysicalLocation: &sarif.PhysicalLocation{ + ArtifactLocation: &sarif.ArtifactLocation{URI: strPtr("file1.js")}, + }}, + }, + }, + }, + }, + }, + sourceRuns: []*sarif.Run{{Results: []*sarif.Result{}}}, + expectedCount: 0, + expectedFiles: []string{}, + }, + { + name: "empty source and target", + targetRuns: []*sarif.Run{{Results: []*sarif.Result{}}}, + sourceRuns: []*sarif.Run{{Results: []*sarif.Result{}}}, + expectedCount: 0, + expectedFiles: []string{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Build target keys from target runs + targetKeys := make(map[string]bool) + for _, run := range tc.targetRuns { + extractLocationsOnly(run, targetKeys) + } + + // Filter source runs + filteredRuns := filterSarifRuns(tc.sourceRuns, targetKeys) + + // Count results + resultCount := countSarifResults(filteredRuns) + assert.Equal(t, tc.expectedCount, resultCount) + + // Verify expected files + var foundFiles []string + for _, run := range filteredRuns { + for _, result := range run.Results { + for _, loc := range result.Locations { + if loc.PhysicalLocation != nil && loc.PhysicalLocation.ArtifactLocation != nil && loc.PhysicalLocation.ArtifactLocation.URI != nil { + foundFiles = append(foundFiles, *loc.PhysicalLocation.ArtifactLocation.URI) + } + } + } + } + assert.ElementsMatch(t, tc.expectedFiles, foundFiles) + }) + } +} + +func TestFilterSarifRuns_FingerprintBased(t *testing.T) { + testCases := []struct { + name string + targetRuns []*sarif.Run + sourceRuns []*sarif.Run + expectedCount int + }{ + { + name: "new issue with fingerprint - empty target", + targetRuns: []*sarif.Run{{Results: []*sarif.Result{{}}}}, + sourceRuns: []*sarif.Run{ + { + Results: []*sarif.Result{ + { + Fingerprints: map[string]string{ + "precise_sink_and_sink_function": "fingerprint2", + }, + }, + }, + }, + }, + expectedCount: 1, + }, + { + name: "no new issues - same fingerprint exists", + targetRuns: []*sarif.Run{ + { + Results: []*sarif.Result{ + { + Fingerprints: map[string]string{ + "precise_sink_and_sink_function": "fingerprint1", + }, + Locations: []*sarif.Location{ + {PhysicalLocation: &sarif.PhysicalLocation{ + ArtifactLocation: &sarif.ArtifactLocation{URI: strPtr("file1.js")}, + }}, + }, + }, + }, + }, + }, + sourceRuns: []*sarif.Run{ + { + Results: []*sarif.Result{ + { + Fingerprints: map[string]string{ + "precise_sink_and_sink_function": "fingerprint1", + }, + Locations: []*sarif.Location{ + {PhysicalLocation: &sarif.PhysicalLocation{ + ArtifactLocation: &sarif.ArtifactLocation{URI: strPtr("file2.js")}, + }}, + }, + }, + }, + }, + }, + expectedCount: 0, + }, + { + name: "issue removed - fingerprint based", + targetRuns: []*sarif.Run{ + { + Results: []*sarif.Result{ + { + Fingerprints: map[string]string{ + "precise_sink_and_sink_function": "fingerprint2", + }, + }, + }, + }, + }, + sourceRuns: []*sarif.Run{{Results: []*sarif.Result{}}}, + expectedCount: 0, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Build target keys from target runs using fingerprints + targetKeys := make(map[string]bool) + for _, run := range tc.targetRuns { + extractFingerprints(run, targetKeys) + } + + // Filter source runs + filteredRuns := filterSarifRuns(tc.sourceRuns, targetKeys) + + // Count results + resultCount := countSarifResults(filteredRuns) + assert.Equal(t, tc.expectedCount, resultCount) + }) + } +} + +func TestFilterSarifRuns_WithSnippets(t *testing.T) { + testCases := []struct { + name string + targetRuns []*sarif.Run + sourceRuns []*sarif.Run + expectedCount int + }{ + { + name: "same file different snippet - should be new", + targetRuns: []*sarif.Run{ + { + Results: []*sarif.Result{ + { + Locations: []*sarif.Location{ + {PhysicalLocation: &sarif.PhysicalLocation{ + ArtifactLocation: &sarif.ArtifactLocation{URI: strPtr("file1.js")}, + Region: &sarif.Region{ + Snippet: &sarif.ArtifactContent{Text: strPtr("password = 'secret1'")}, + }, + }}, + }, + }, + }, + }, + }, + sourceRuns: []*sarif.Run{ + { + Results: []*sarif.Result{ + { + Locations: []*sarif.Location{ + {PhysicalLocation: &sarif.PhysicalLocation{ + ArtifactLocation: &sarif.ArtifactLocation{URI: strPtr("file1.js")}, + Region: &sarif.Region{ + Snippet: &sarif.ArtifactContent{Text: strPtr("password = 'secret2'")}, + }, + }}, + }, + }, + }, + }, + }, + expectedCount: 1, + }, + { + name: "same file same snippet - should be filtered", + targetRuns: []*sarif.Run{ + { + Results: []*sarif.Result{ + { + Locations: []*sarif.Location{ + {PhysicalLocation: &sarif.PhysicalLocation{ + ArtifactLocation: &sarif.ArtifactLocation{URI: strPtr("file1.js")}, + Region: &sarif.Region{ + Snippet: &sarif.ArtifactContent{Text: strPtr("password = 'secret1'")}, + }, + }}, + }, + }, + }, + }, + }, + sourceRuns: []*sarif.Run{ + { + Results: []*sarif.Result{ + { + Locations: []*sarif.Location{ + {PhysicalLocation: &sarif.PhysicalLocation{ + ArtifactLocation: &sarif.ArtifactLocation{URI: strPtr("file1.js")}, + Region: &sarif.Region{ + Snippet: &sarif.ArtifactContent{Text: strPtr("password = 'secret1'")}, + }, + }}, + }, + }, + }, + }, + }, + expectedCount: 0, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + targetKeys := make(map[string]bool) + for _, run := range tc.targetRuns { + extractLocationsOnly(run, targetKeys) + } + + filteredRuns := filterSarifRuns(tc.sourceRuns, targetKeys) + resultCount := countSarifResults(filteredRuns) + assert.Equal(t, tc.expectedCount, resultCount) + }) + } +} + +func TestExtractRelativePath(t *testing.T) { + testCases := []struct { + name string + resultPath string + projectRoot string + expected string + }{ + { + name: "simple relative path", + resultPath: "/tmp/project/src/file.js", + projectRoot: "/tmp/project", + expected: "src/file.js", + }, + { + name: "file prefix removal", + resultPath: "file:///tmp/project/src/file.js", + projectRoot: "/tmp/project", + expected: "src/file.js", + }, + { + name: "private prefix removal (macOS)", + resultPath: "file:///private/var/folders/tmp/project/src/file.js", + projectRoot: "/var/folders/tmp/project", + expected: "src/file.js", + }, + { + name: "empty project root", + resultPath: "src/file.js", + projectRoot: "", + expected: "src/file.js", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := extractRelativePath(tc.resultPath, tc.projectRoot) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestGetResultFingerprint(t *testing.T) { + testCases := []struct { + name string + result *sarif.Result + expected string + }{ + { + name: "has fingerprint", + result: &sarif.Result{ + Fingerprints: map[string]string{ + "precise_sink_and_sink_function": "test-fingerprint-123", + }, + }, + expected: "test-fingerprint-123", + }, + { + name: "no fingerprint key", + result: &sarif.Result{ + Fingerprints: map[string]string{ + "other_key": "some-value", + }, + }, + expected: "", + }, + { + name: "nil fingerprints", + result: &sarif.Result{}, + expected: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := getResultFingerprint(tc.result) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestGetLocationSnippetText(t *testing.T) { + testCases := []struct { + name string + location *sarif.Location + expected string + }{ + { + name: "has snippet", + location: &sarif.Location{ + PhysicalLocation: &sarif.PhysicalLocation{ + Region: &sarif.Region{ + Snippet: &sarif.ArtifactContent{Text: strPtr("password = 'secret'")}, + }, + }, + }, + expected: "password = 'secret'", + }, + { + name: "no snippet", + location: &sarif.Location{ + PhysicalLocation: &sarif.PhysicalLocation{ + Region: &sarif.Region{}, + }, + }, + expected: "", + }, + { + name: "nil region", + location: &sarif.Location{ + PhysicalLocation: &sarif.PhysicalLocation{}, + }, + expected: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := getLocationSnippetText(tc.location) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestGetLocationFileName(t *testing.T) { + testCases := []struct { + name string + location *sarif.Location + expected string + }{ + { + name: "has file name", + location: &sarif.Location{ + PhysicalLocation: &sarif.PhysicalLocation{ + ArtifactLocation: &sarif.ArtifactLocation{URI: strPtr("src/main.js")}, + }, + }, + expected: "src/main.js", + }, + { + name: "nil artifact location", + location: &sarif.Location{ + PhysicalLocation: &sarif.PhysicalLocation{}, + }, + expected: "", + }, + { + name: "nil location", + location: nil, + expected: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := getLocationFileName(tc.location) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestGetInvocationWorkingDirectory(t *testing.T) { + testCases := []struct { + name string + invocation *sarif.Invocation + expected string + }{ + { + name: "has working directory", + invocation: &sarif.Invocation{ + WorkingDirectory: &sarif.ArtifactLocation{URI: strPtr("/tmp/project")}, + }, + expected: "/tmp/project", + }, + { + name: "nil working directory", + invocation: &sarif.Invocation{ + WorkingDirectory: nil, + }, + expected: "", + }, + { + name: "nil invocation", + invocation: nil, + expected: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := getInvocationWorkingDirectory(tc.invocation) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestMergeStatusCodes(t *testing.T) { + testCases := []struct { + name string + target ResultsStatus + source ResultsStatus + expected ResultsStatus + }{ + { + name: "both nil", + target: ResultsStatus{}, + source: ResultsStatus{}, + expected: ResultsStatus{}, + }, + { + name: "target has error, source nil", + target: ResultsStatus{ScaScanStatusCode: intPtr(1)}, + source: ResultsStatus{}, + expected: ResultsStatus{ + ScaScanStatusCode: intPtr(1), + }, + }, + { + name: "source has error, target nil", + target: ResultsStatus{}, + source: ResultsStatus{ScaScanStatusCode: intPtr(1)}, + expected: ResultsStatus{ + ScaScanStatusCode: intPtr(1), + }, + }, + { + name: "target has error, source zero", + target: ResultsStatus{ScaScanStatusCode: intPtr(1)}, + source: ResultsStatus{ScaScanStatusCode: intPtr(0)}, + expected: ResultsStatus{ + ScaScanStatusCode: intPtr(1), + }, + }, + { + name: "target zero, source has error", + target: ResultsStatus{ScaScanStatusCode: intPtr(0)}, + source: ResultsStatus{ScaScanStatusCode: intPtr(1)}, + expected: ResultsStatus{ + ScaScanStatusCode: intPtr(1), + }, + }, + { + name: "multiple fields", + target: ResultsStatus{ + ScaScanStatusCode: intPtr(1), + SecretsScanStatusCode: intPtr(0), + }, + source: ResultsStatus{ + ScaScanStatusCode: intPtr(0), + SecretsScanStatusCode: intPtr(2), + IacScanStatusCode: intPtr(3), + }, + expected: ResultsStatus{ + ScaScanStatusCode: intPtr(1), + SecretsScanStatusCode: intPtr(2), + IacScanStatusCode: intPtr(3), + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := MergeStatusCodes(tc.target, tc.source) + + assertStatusCodeEqual(t, tc.expected.ScaScanStatusCode, result.ScaScanStatusCode, "ScaScanStatusCode") + assertStatusCodeEqual(t, tc.expected.SecretsScanStatusCode, result.SecretsScanStatusCode, "SecretsScanStatusCode") + assertStatusCodeEqual(t, tc.expected.IacScanStatusCode, result.IacScanStatusCode, "IacScanStatusCode") + assertStatusCodeEqual(t, tc.expected.SastScanStatusCode, result.SastScanStatusCode, "SastScanStatusCode") + }) + } +} + +func assertStatusCodeEqual(t *testing.T, expected, actual *int, field string) { + if expected == nil { + assert.Nil(t, actual, field+" should be nil") + } else { + assert.NotNil(t, actual, field+" should not be nil") + if actual != nil { + assert.Equal(t, *expected, *actual, field) + } + } +} + +func TestCountSarifResults(t *testing.T) { + testCases := []struct { + name string + runs []*sarif.Run + expected int + }{ + { + name: "nil runs", + runs: nil, + expected: 0, + }, + { + name: "empty runs", + runs: []*sarif.Run{}, + expected: 0, + }, + { + name: "single run with results", + runs: []*sarif.Run{ + {Results: []*sarif.Result{{}, {}, {}}}, + }, + expected: 3, + }, + { + name: "multiple runs", + runs: []*sarif.Run{ + {Results: []*sarif.Result{{}, {}}}, + {Results: []*sarif.Result{{}}}, + }, + expected: 3, + }, + { + name: "run with nil", + runs: []*sarif.Run{ + nil, + {Results: []*sarif.Result{{}}}, + }, + expected: 1, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := countSarifResults(tc.runs) + assert.Equal(t, tc.expected, result) + }) + } +} + +// Integration test using real SARIF files from analyzer-manager. +// Note: The test files have different working directories (temp folders), +// so without normalizing paths the diff will show 1 "new" finding. +// This test verifies the SARIF parsing and filtering logic works correctly. +func TestFilterSarifRuns_RealSecretsData(t *testing.T) { + testDataDir := filepath.Join("..", "..", "tests", "testdata", "other", "diff-scan") + + targetSarifBytes, err := os.ReadFile(filepath.Join(testDataDir, "target.sarif")) + require.NoError(t, err, "Failed to read target.sarif") + + sourceSarifBytes, err := os.ReadFile(filepath.Join(testDataDir, "results.sarif")) + require.NoError(t, err, "Failed to read results.sarif (source)") + + targetReport, err := sarif.FromBytes(targetSarifBytes) + require.NoError(t, err, "Failed to parse target SARIF") + + sourceReport, err := sarif.FromBytes(sourceSarifBytes) + require.NoError(t, err, "Failed to parse source SARIF") + + require.NotEmpty(t, targetReport.Runs, "Target should have runs") + require.NotEmpty(t, sourceReport.Runs, "Source should have runs") + + // Verify both files contain the same secret content (snippet) + targetSnippet := getLocationSnippetText(targetReport.Runs[0].Results[0].Locations[0]) + sourceSnippet := getLocationSnippetText(sourceReport.Runs[0].Results[0].Locations[0]) + assert.Equal(t, targetSnippet, sourceSnippet, "Both files should have the same secret snippet") + assert.Equal(t, "password: jnvkjcxnjvxnvk22222", targetSnippet) + + // Build target keys using filename+snippet (this matches same secrets even with different paths) + targetKeys := make(map[string]bool) + for _, run := range targetReport.Runs { + for _, result := range run.Results { + for _, location := range result.Locations { + // Use just filename (last path component) + snippet for matching + fileName := getLocationFileName(location) + if fileName != "" { + fileName = filepath.Base(fileName) + } + key := fileName + getLocationSnippetText(location) + targetKeys[key] = true + } + } + } + + // Filter source using same key generation + var filteredResults []*sarif.Result + for _, run := range sourceReport.Runs { + for _, result := range run.Results { + var filteredLocations []*sarif.Location + for _, location := range result.Locations { + fileName := getLocationFileName(location) + if fileName != "" { + fileName = filepath.Base(fileName) + } + key := fileName + getLocationSnippetText(location) + if !targetKeys[key] { + filteredLocations = append(filteredLocations, location) + } + } + if len(filteredLocations) > 0 { + newResult := *result + newResult.Locations = filteredLocations + filteredResults = append(filteredResults, &newResult) + } + } + } + + // Same file (TOKENS) with same snippet should result in 0 new findings + assert.Equal(t, 0, len(filteredResults), "Same secrets should be filtered out") +} From a5897a9b1e68bec751d5b9615117fd34e6767abd Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Wed, 14 Jan 2026 10:17:26 +0200 Subject: [PATCH 14/18] Remove unused MergeStatusCodes function and tests --- utils/results/diff.go | 26 ----------- utils/results/diff_test.go | 91 -------------------------------------- 2 files changed, 117 deletions(-) diff --git a/utils/results/diff.go b/utils/results/diff.go index 7f735d0e4..991fab048 100644 --- a/utils/results/diff.go +++ b/utils/results/diff.go @@ -261,32 +261,6 @@ func extractRelativePath(resultPath string, projectRoot string) string { return strings.TrimPrefix(trimSlash, "/") } -// MergeStatusCodes merges two ResultsStatus, taking the worst (non-zero) status for each scanner. -func MergeStatusCodes(target, source ResultsStatus) ResultsStatus { - merged := ResultsStatus{} - merged.SbomScanStatusCode = mergeStatusCode(target.SbomScanStatusCode, source.SbomScanStatusCode) - merged.ScaScanStatusCode = mergeStatusCode(target.ScaScanStatusCode, source.ScaScanStatusCode) - merged.ContextualAnalysisStatusCode = mergeStatusCode(target.ContextualAnalysisStatusCode, source.ContextualAnalysisStatusCode) - merged.SecretsScanStatusCode = mergeStatusCode(target.SecretsScanStatusCode, source.SecretsScanStatusCode) - merged.IacScanStatusCode = mergeStatusCode(target.IacScanStatusCode, source.IacScanStatusCode) - merged.SastScanStatusCode = mergeStatusCode(target.SastScanStatusCode, source.SastScanStatusCode) - merged.ViolationsStatusCode = mergeStatusCode(target.ViolationsStatusCode, source.ViolationsStatusCode) - return merged -} - -func mergeStatusCode(a, b *int) *int { - if a == nil { - return b - } - if b == nil { - return a - } - if *a != 0 { - return a - } - return b -} - func filterSarifRuns(sourceRuns []*sarif.Run, targetKeys map[string]bool) []*sarif.Run { var filteredRuns []*sarif.Run diff --git a/utils/results/diff_test.go b/utils/results/diff_test.go index b43425e25..6a6143259 100644 --- a/utils/results/diff_test.go +++ b/utils/results/diff_test.go @@ -14,10 +14,6 @@ func strPtr(s string) *string { return &s } -func intPtr(i int) *int { - return &i -} - func TestFilterSarifRuns_LocationBased(t *testing.T) { testCases := []struct { name string @@ -554,93 +550,6 @@ func TestGetInvocationWorkingDirectory(t *testing.T) { } } -func TestMergeStatusCodes(t *testing.T) { - testCases := []struct { - name string - target ResultsStatus - source ResultsStatus - expected ResultsStatus - }{ - { - name: "both nil", - target: ResultsStatus{}, - source: ResultsStatus{}, - expected: ResultsStatus{}, - }, - { - name: "target has error, source nil", - target: ResultsStatus{ScaScanStatusCode: intPtr(1)}, - source: ResultsStatus{}, - expected: ResultsStatus{ - ScaScanStatusCode: intPtr(1), - }, - }, - { - name: "source has error, target nil", - target: ResultsStatus{}, - source: ResultsStatus{ScaScanStatusCode: intPtr(1)}, - expected: ResultsStatus{ - ScaScanStatusCode: intPtr(1), - }, - }, - { - name: "target has error, source zero", - target: ResultsStatus{ScaScanStatusCode: intPtr(1)}, - source: ResultsStatus{ScaScanStatusCode: intPtr(0)}, - expected: ResultsStatus{ - ScaScanStatusCode: intPtr(1), - }, - }, - { - name: "target zero, source has error", - target: ResultsStatus{ScaScanStatusCode: intPtr(0)}, - source: ResultsStatus{ScaScanStatusCode: intPtr(1)}, - expected: ResultsStatus{ - ScaScanStatusCode: intPtr(1), - }, - }, - { - name: "multiple fields", - target: ResultsStatus{ - ScaScanStatusCode: intPtr(1), - SecretsScanStatusCode: intPtr(0), - }, - source: ResultsStatus{ - ScaScanStatusCode: intPtr(0), - SecretsScanStatusCode: intPtr(2), - IacScanStatusCode: intPtr(3), - }, - expected: ResultsStatus{ - ScaScanStatusCode: intPtr(1), - SecretsScanStatusCode: intPtr(2), - IacScanStatusCode: intPtr(3), - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := MergeStatusCodes(tc.target, tc.source) - - assertStatusCodeEqual(t, tc.expected.ScaScanStatusCode, result.ScaScanStatusCode, "ScaScanStatusCode") - assertStatusCodeEqual(t, tc.expected.SecretsScanStatusCode, result.SecretsScanStatusCode, "SecretsScanStatusCode") - assertStatusCodeEqual(t, tc.expected.IacScanStatusCode, result.IacScanStatusCode, "IacScanStatusCode") - assertStatusCodeEqual(t, tc.expected.SastScanStatusCode, result.SastScanStatusCode, "SastScanStatusCode") - }) - } -} - -func assertStatusCodeEqual(t *testing.T, expected, actual *int, field string) { - if expected == nil { - assert.Nil(t, actual, field+" should be nil") - } else { - assert.NotNil(t, actual, field+" should not be nil") - if actual != nil { - assert.Equal(t, *expected, *actual, field) - } - } -} - func TestCountSarifResults(t *testing.T) { testCases := []struct { name string From 22f7713849ad9995fee9a824017e841c3b2b4467 Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Wed, 14 Jan 2026 10:39:23 +0200 Subject: [PATCH 15/18] Fix UnifyScaAndJasResults to preserve Applicability from SCA scan --- utils/results/diff.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/utils/results/diff.go b/utils/results/diff.go index 991fab048..a6fd56aca 100644 --- a/utils/results/diff.go +++ b/utils/results/diff.go @@ -37,11 +37,20 @@ func UnifyScaAndJasResults(scaResults, jasDiffResults *SecurityCommandResults) * ScanTarget: scaTarget.ScanTarget, AppsConfigModule: scaTarget.AppsConfigModule, ScaResults: scaTarget.ScaResults, - JasResults: nil, + JasResults: scaTarget.JasResults, } - if jasTarget != nil { - unifiedTarget.JasResults = jasTarget.JasResults + if jasTarget != nil && jasTarget.JasResults != nil { + if unifiedTarget.JasResults == nil { + unifiedTarget.JasResults = jasTarget.JasResults + } else { + unifiedTarget.JasResults.JasVulnerabilities.SecretsScanResults = jasTarget.JasResults.JasVulnerabilities.SecretsScanResults + unifiedTarget.JasResults.JasVulnerabilities.IacScanResults = jasTarget.JasResults.JasVulnerabilities.IacScanResults + unifiedTarget.JasResults.JasVulnerabilities.SastScanResults = jasTarget.JasResults.JasVulnerabilities.SastScanResults + unifiedTarget.JasResults.JasViolations.SecretsScanResults = jasTarget.JasResults.JasViolations.SecretsScanResults + unifiedTarget.JasResults.JasViolations.IacScanResults = jasTarget.JasResults.JasViolations.IacScanResults + unifiedTarget.JasResults.JasViolations.SastScanResults = jasTarget.JasResults.JasViolations.SastScanResults + } } unifiedResults.Targets = append(unifiedResults.Targets, unifiedTarget) From 56609db7f324a3e79b686064890886432d41c784 Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Wed, 14 Jan 2026 12:30:08 +0200 Subject: [PATCH 16/18] Fix: Copy GitContext in UnifyScaAndJasResults for proper upload paths --- utils/results/diff.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/utils/results/diff.go b/utils/results/diff.go index a6fd56aca..0594be3f3 100644 --- a/utils/results/diff.go +++ b/utils/results/diff.go @@ -11,6 +11,11 @@ import ( // UnifyScaAndJasResults merges SCA and JAS diff results into a single SecurityCommandResults. func UnifyScaAndJasResults(scaResults, jasDiffResults *SecurityCommandResults) *SecurityCommandResults { + gitContext := scaResults.GitContext + if gitContext == nil { + gitContext = jasDiffResults.GitContext + } + unifiedResults := &SecurityCommandResults{ ResultsMetaData: ResultsMetaData{ EntitledForJas: jasDiffResults.EntitledForJas, @@ -21,6 +26,7 @@ func UnifyScaAndJasResults(scaResults, jasDiffResults *SecurityCommandResults) * MultiScanId: jasDiffResults.MultiScanId, StartTime: jasDiffResults.StartTime, ResultContext: jasDiffResults.ResultContext, + GitContext: gitContext, }, } From 816605dfd51c3981aea3a5060d3ac273322d7c04 Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Thu, 15 Jan 2026 11:48:23 +0200 Subject: [PATCH 17/18] CR fixes - update go.mod replace directive to use git commit --- go.mod | 4 ++-- go.sum | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index c5cfc57b2..c796a4875 100644 --- a/go.mod +++ b/go.mod @@ -135,8 +135,6 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect ) -replace github.com/jfrog/jfrog-client-go => /Users/eyalk/Desktop/team-projects/jfrog-client-go - // replace github.com/jfrog/jfrog-cli-core/v2 => github.com/jfrog/jfrog-cli-core/v2 master //replace github.com/jfrog/jfrog-cli-artifactory => github.com/jfrog/jfrog-cli-artifactory main @@ -144,3 +142,5 @@ replace github.com/jfrog/jfrog-client-go => /Users/eyalk/Desktop/team-projects/j // replace github.com/jfrog/build-info-go => github.com/jfrog/build-info-go dev // replace github.com/jfrog/froggit-go => github.com/jfrog/froggit-go master + +replace github.com/jfrog/jfrog-client-go => github.com/eyalk007/jfrog-client-go v0.0.0-20260114112951-67b77f49255f diff --git a/go.sum b/go.sum index 22ba2fb2a..e40004109 100644 --- a/go.sum +++ b/go.sum @@ -70,6 +70,8 @@ github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/eyalk007/jfrog-client-go v0.0.0-20260114112951-67b77f49255f h1:wievyISUpwoYv47Q+SreXShHnwPaNBkcqGjSOJ7hRZk= +github.com/eyalk007/jfrog-client-go v0.0.0-20260114112951-67b77f49255f/go.mod h1:sCE06+GngPoyrGO0c+vmhgMoVSP83UMNiZnIuNPzU8U= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= From 1ca4a0af83b76122c6d3573e72d8b57dfcfd8b2d Mon Sep 17 00:00:00 2001 From: Eyal Kapon Date: Sun, 18 Jan 2026 12:18:48 +0200 Subject: [PATCH 18/18] CR fixes: extract logger helper, use sarifutils, rename functions --- commands/audit/audit.go | 3 + jas/runner/jasrunner.go | 8 +- sca/scan/scascan.go | 8 +- tests/testdata/other/diff-scan/target.sarif | 143 +++++++++++++++- utils/parallel_runner.go | 13 ++ utils/results/diff.go | 125 ++++---------- utils/results/diff_test.go | 180 ++------------------ 7 files changed, 207 insertions(+), 273 deletions(-) diff --git a/commands/audit/audit.go b/commands/audit/audit.go index 7bf747044..be4c34a63 100644 --- a/commands/audit/audit.go +++ b/commands/audit/audit.go @@ -628,9 +628,12 @@ func addJasScansToRunner(auditParallelRunner *utils.SecurityParallelRunner, audi return } auditParallelRunner.JasWg.Add(1) + // Capture current logger (may be a BufferedLogger for isolated parallel logging). + // Worker goroutines need this propagated so their logs are captured in the same buffer. currentLogger := log.GetLogger() jasTask := createJasScansTask(auditParallelRunner, scanResults, serverDetails, auditParams, jasScanner) wrappedJasTask := func(threadId int) error { + // Propagate parent's logger to this worker goroutine for isolated log capture log.SetLoggerForGoroutine(currentLogger) defer log.ClearLoggerForGoroutine() return jasTask(threadId) diff --git a/jas/runner/jasrunner.go b/jas/runner/jasrunner.go index aafdc67ab..6ab730f6a 100644 --- a/jas/runner/jasrunner.go +++ b/jas/runner/jasrunner.go @@ -128,12 +128,8 @@ func addJasScanTaskForModuleIfNeeded(params JasRunnerParams, subScan utils.SubSc func addModuleJasScanTask(scanType jasutils.JasScanType, securityParallelRunner *utils.SecurityParallelRunner, task parallel.TaskFunc, scanResults *results.TargetResults, allowSkippingErrors bool) (generalError error) { securityParallelRunner.JasScannersWg.Add(1) - currentLogger := log.GetLogger() - wrappedTask := func(threadId int) error { - log.SetLoggerForGoroutine(currentLogger) - defer log.ClearLoggerForGoroutine() - return task(threadId) - } + // Wrap task to propagate logger to worker goroutines (for isolated parallel logging) + wrappedTask := utils.WrapTaskWithLoggerPropagation(task) if _, addTaskErr := securityParallelRunner.Runner.AddTaskWithError(wrappedTask, func(err error) { _ = scanResults.AddTargetError(fmt.Errorf("failed to run %s scan: %s", scanType, err.Error()), allowSkippingErrors) }); addTaskErr != nil { diff --git a/sca/scan/scascan.go b/sca/scan/scascan.go index a8c8fa544..f203a4980 100644 --- a/sca/scan/scascan.go +++ b/sca/scan/scascan.go @@ -73,13 +73,9 @@ func RunScaScan(strategy SbomScanStrategy, params ScaScanParams) (generalError e // For Audit scans, we run the scan in parallel using the SecurityParallelRunner. func runScaScanWithRunner(strategy SbomScanStrategy, params ScaScanParams) (generalError error) { targetResult := params.ScanResults - currentLogger := log.GetLogger() scaTask := createScaScanTaskWithRunner(params.Runner, strategy, params) - wrappedScaTask := func(threadId int) error { - log.SetLoggerForGoroutine(currentLogger) - defer log.ClearLoggerForGoroutine() - return scaTask(threadId) - } + // Wrap task to propagate logger to worker goroutines (for isolated parallel logging) + wrappedScaTask := utils.WrapTaskWithLoggerPropagation(scaTask) // Create sca scan task if _, taskCreationErr := params.Runner.Runner.AddTaskWithError(wrappedScaTask, func(err error) { _ = targetResult.AddTargetError(fmt.Errorf("failed to execute SCA scan: %s", err.Error()), params.AllowPartialResults) diff --git a/tests/testdata/other/diff-scan/target.sarif b/tests/testdata/other/diff-scan/target.sarif index 4356cd940..7da1c3967 100644 --- a/tests/testdata/other/diff-scan/target.sarif +++ b/tests/testdata/other/diff-scan/target.sarif @@ -1 +1,142 @@ -{"version":"2.1.0","$schema":"https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json","runs":[{"tool":{"driver":{"informationUri":"https://jfrog.com/help/r/jfrog-security-documentation/jfrog-advanced-security","name":"JFrog Secrets scanner","rules":[{"id":"REQ.SECRET.GENERIC.TEXT","shortDescription":{"text":"Scanner for REQ.SECRET.GENERIC.TEXT"},"fullDescription":{"text":"Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n","markdown":"Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n"},"properties":{"applicability":"applicable","conclusion":"negative","scanner_id":null,"security-severity":"8.9"}},{"id":"REQ.SECRET.GENERIC.CODE","shortDescription":{"text":"Scanner for REQ.SECRET.GENERIC.CODE"},"fullDescription":{"text":"Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n","markdown":"Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n"},"properties":{"applicability":"undetermined","conclusion":"private","scanner_id":null}},{"id":"REQ.SECRET.KEYS","shortDescription":{"text":"Scanner for REQ.SECRET.KEYS"},"fullDescription":{"text":"\nStoring an API key in the image could lead to several risks.\n\nIf the key is associated with a wide scope of privileges, attackers could extract it from a single image or firmware and use it maliciously to attack many targets. For example, if the embedded key allows querying/modifying data for all cloud user accounts, without per-user authentication, the attackers who extract it would gain access to system-wide data.\n\nIf the cloud/SaaS provider bills by key usage - for example, every million queries cost the key's owner a fixed sum of money - attackers could use the keys for their own purposes (or just as a form of vandalism), incurring a large cost to the legitimate user or operator.\n\n## Best practices\n\nUse narrow scopes for stored API keys. As much as possible, API keys should be unique per host and require additional authentication with the user's individual credentials for any sensitive actions.\n\nAvoid placing keys whose use incurs costs directly in the image. Store the key with any software or hardware protection available on the host for key storage (such as operating system key-stores, hardware cryptographic storage mechanisms or cloud-managed secure storage services such as [AWS KMS](https://aws.amazon.com/kms/)).\n\nTokens that were detected as exposed should be revoked and replaced -\n\n* [AWS Key Revocation](https://aws.amazon.com/premiumsupport/knowledge-center/delete-access-key/#:~:text=If%20you%20see%20a%20warning,the%20confirmation%20box%2C%20choose%20Deactivate.)\n* [GCP Key Revocation](https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudIAM/delete-api-keys.html)\n* [Azure Key Revocation](https://docs.microsoft.com/en-us/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=azure-devops&tabs=Windows#revoke-a-pat)\n* [GitHub Key Revocation](https://docs.github.com/en/rest/apps/oauth-applications#delete-an-app-authorization)\n","markdown":"\nStoring an API key in the image could lead to several risks.\n\nIf the key is associated with a wide scope of privileges, attackers could extract it from a single image or firmware and use it maliciously to attack many targets. For example, if the embedded key allows querying/modifying data for all cloud user accounts, without per-user authentication, the attackers who extract it would gain access to system-wide data.\n\nIf the cloud/SaaS provider bills by key usage - for example, every million queries cost the key's owner a fixed sum of money - attackers could use the keys for their own purposes (or just as a form of vandalism), incurring a large cost to the legitimate user or operator.\n\n## Best practices\n\nUse narrow scopes for stored API keys. As much as possible, API keys should be unique per host and require additional authentication with the user's individual credentials for any sensitive actions.\n\nAvoid placing keys whose use incurs costs directly in the image. Store the key with any software or hardware protection available on the host for key storage (such as operating system key-stores, hardware cryptographic storage mechanisms or cloud-managed secure storage services such as [AWS KMS](https://aws.amazon.com/kms/)).\n\nTokens that were detected as exposed should be revoked and replaced -\n\n* [AWS Key Revocation](https://aws.amazon.com/premiumsupport/knowledge-center/delete-access-key/#:~:text=If%20you%20see%20a%20warning,the%20confirmation%20box%2C%20choose%20Deactivate.)\n* [GCP Key Revocation](https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudIAM/delete-api-keys.html)\n* [Azure Key Revocation](https://docs.microsoft.com/en-us/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=azure-devops&tabs=Windows#revoke-a-pat)\n* [GitHub Key Revocation](https://docs.github.com/en/rest/apps/oauth-applications#delete-an-app-authorization)\n"},"properties":{"applicability":"undetermined","conclusion":"private","scanner_id":"1235","undetermined_reason":""}},{"id":"REQ.CRED.PUBLIC-ONLY","shortDescription":{"text":"Scanner for REQ.CRED.PUBLIC-ONLY"},"fullDescription":{"text":"","markdown":""},"properties":{"applicability":"undetermined","conclusion":"private","scanner_id":"125","undetermined_reason":""}},{"id":"REQ.SECRET.GENERIC.URL-TEXT","shortDescription":{"text":"Scanner for REQ.SECRET.GENERIC.URL-TEXT"},"fullDescription":{"text":"Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n","markdown":"Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n"},"properties":{"applicability":"undetermined","conclusion":"private","scanner_id":null}}],"version":"1.0"}},"invocations":[{"arguments":["/Users/assafa/.jfrog/dependencies/analyzerManager/jas_scanner/jas_scanner","scan","/var/folders/xv/th4cksxn7jv9wjrdnn1h4tj00000gq/T/jfrog.cli.temp.-1747638540-1087327516/Secrets_1747638541/config.yaml"],"executionSuccessful":true,"workingDirectory":{"uri":"file:////var/folders/xv/th4cksxn7jv9wjrdnn1h4tj00000gq/T/jfrog.cli.temp.-1747638503-4092437536"}}],"results":[{"properties":{"metadata":"","tokenValidation":""},"ruleId":"REQ.SECRET.GENERIC.TEXT","level":"error","message":{"text":"Hardcoded secrets were found"},"locations":[{"physicalLocation":{"artifactLocation":{"uri":"file:///private/var/folders/xv/th4cksxn7jv9wjrdnn1h4tj00000gq/T/jfrog.cli.temp.-1747638503-4092437536/TOKENS"},"region":{"startLine":1,"startColumn":1,"endLine":1,"endColumn":30,"snippet":{"text":"password: jnvkjcxnjvxnvk22222"}}}}],"partialFingerprints":{"jfrogSecret":"085de62ad4aa0dc22cf7d733811687c08b5517c4414326723e05e75a822ee58d"}}]}]} \ No newline at end of file +{ + "version": "2.1.0", + "$schema": "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json", + "runs": [ + { + "tool": { + "driver": { + "informationUri": "https://jfrog.com/help/r/jfrog-security-documentation/jfrog-advanced-security", + "name": "JFrog Secrets scanner", + "rules": [ + { + "id": "REQ.SECRET.GENERIC.TEXT", + "shortDescription": { + "text": "Scanner for REQ.SECRET.GENERIC.TEXT" + }, + "fullDescription": { + "text": "Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n", + "markdown": "Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n" + }, + "properties": { + "applicability": "applicable", + "conclusion": "negative", + "scanner_id": null, + "security-severity": "8.9" + } + }, + { + "id": "REQ.SECRET.GENERIC.CODE", + "shortDescription": { + "text": "Scanner for REQ.SECRET.GENERIC.CODE" + }, + "fullDescription": { + "text": "Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n", + "markdown": "Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n" + }, + "properties": { + "applicability": "undetermined", + "conclusion": "private", + "scanner_id": null + } + }, + { + "id": "REQ.SECRET.KEYS", + "shortDescription": { + "text": "Scanner for REQ.SECRET.KEYS" + }, + "fullDescription": { + "text": "\nStoring an API key in the image could lead to several risks.\n\nIf the key is associated with a wide scope of privileges, attackers could extract it from a single image or firmware and use it maliciously to attack many targets. For example, if the embedded key allows querying/modifying data for all cloud user accounts, without per-user authentication, the attackers who extract it would gain access to system-wide data.\n\nIf the cloud/SaaS provider bills by key usage - for example, every million queries cost the key's owner a fixed sum of money - attackers could use the keys for their own purposes (or just as a form of vandalism), incurring a large cost to the legitimate user or operator.\n\n## Best practices\n\nUse narrow scopes for stored API keys. As much as possible, API keys should be unique per host and require additional authentication with the user's individual credentials for any sensitive actions.\n\nAvoid placing keys whose use incurs costs directly in the image. Store the key with any software or hardware protection available on the host for key storage (such as operating system key-stores, hardware cryptographic storage mechanisms or cloud-managed secure storage services such as [AWS KMS](https://aws.amazon.com/kms/)).\n\nTokens that were detected as exposed should be revoked and replaced -\n\n* [AWS Key Revocation](https://aws.amazon.com/premiumsupport/knowledge-center/delete-access-key/#:~:text=If%20you%20see%20a%20warning,the%20confirmation%20box%2C%20choose%20Deactivate.)\n* [GCP Key Revocation](https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudIAM/delete-api-keys.html)\n* [Azure Key Revocation](https://docs.microsoft.com/en-us/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=azure-devops&tabs=Windows#revoke-a-pat)\n* [GitHub Key Revocation](https://docs.github.com/en/rest/apps/oauth-applications#delete-an-app-authorization)\n", + "markdown": "\nStoring an API key in the image could lead to several risks.\n\nIf the key is associated with a wide scope of privileges, attackers could extract it from a single image or firmware and use it maliciously to attack many targets. For example, if the embedded key allows querying/modifying data for all cloud user accounts, without per-user authentication, the attackers who extract it would gain access to system-wide data.\n\nIf the cloud/SaaS provider bills by key usage - for example, every million queries cost the key's owner a fixed sum of money - attackers could use the keys for their own purposes (or just as a form of vandalism), incurring a large cost to the legitimate user or operator.\n\n## Best practices\n\nUse narrow scopes for stored API keys. As much as possible, API keys should be unique per host and require additional authentication with the user's individual credentials for any sensitive actions.\n\nAvoid placing keys whose use incurs costs directly in the image. Store the key with any software or hardware protection available on the host for key storage (such as operating system key-stores, hardware cryptographic storage mechanisms or cloud-managed secure storage services such as [AWS KMS](https://aws.amazon.com/kms/)).\n\nTokens that were detected as exposed should be revoked and replaced -\n\n* [AWS Key Revocation](https://aws.amazon.com/premiumsupport/knowledge-center/delete-access-key/#:~:text=If%20you%20see%20a%20warning,the%20confirmation%20box%2C%20choose%20Deactivate.)\n* [GCP Key Revocation](https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudIAM/delete-api-keys.html)\n* [Azure Key Revocation](https://docs.microsoft.com/en-us/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=azure-devops&tabs=Windows#revoke-a-pat)\n* [GitHub Key Revocation](https://docs.github.com/en/rest/apps/oauth-applications#delete-an-app-authorization)\n" + }, + "properties": { + "applicability": "undetermined", + "conclusion": "private", + "scanner_id": "1235", + "undetermined_reason": "" + } + }, + { + "id": "REQ.CRED.PUBLIC-ONLY", + "shortDescription": { + "text": "Scanner for REQ.CRED.PUBLIC-ONLY" + }, + "fullDescription": { + "text": "", + "markdown": "" + }, + "properties": { + "applicability": "undetermined", + "conclusion": "private", + "scanner_id": "125", + "undetermined_reason": "" + } + }, + { + "id": "REQ.SECRET.GENERIC.URL-TEXT", + "shortDescription": { + "text": "Scanner for REQ.SECRET.GENERIC.URL-TEXT" + }, + "fullDescription": { + "text": "Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n", + "markdown": "Storing hardcoded secrets in your source code or binary artifact could lead to several risks.\n\nIf the secret is associated with a wide scope of privileges, attackers could extract it from the source code or binary artifact and use it maliciously to attack many targets. For example, if the hardcoded password gives high-privilege access to an AWS account, the attackers may be able to query/modify company-wide sensitive data without per-user authentication.\n\n## Best practices\n\nUse safe storage when storing high-privilege secrets such as passwords and tokens, for example -\n\n* ### Environment Variables\n\nEnvironment variables are set outside of the application code, and can be dynamically passed to the application only when needed, for example -\n`SECRET_VAR=MySecret ./my_application`\nThis way, `MySecret` does not have to be hardcoded into `my_application`.\n\nNote that if your entire binary artifact is published (ex. a Docker container published to Docker Hub), the value for the environment variable must not be stored in the artifact itself (ex. inside the `Dockerfile` or one of the container's files) but rather must be passed dynamically, for example in the `docker run` call as an argument.\n\n* ### Secret management services\n\nExternal vendors offer cloud-based secret management services, that provide proper access control to each secret. The given access to each secret can be dynamically modified or even revoked. Some examples include -\n\n* [Hashicorp Vault](https://www.vaultproject.io)\n* [AWS KMS](https://aws.amazon.com/kms) (Key Management Service)\n* [Google Cloud KMS](https://cloud.google.com/security-key-management)\n\n## Least-privilege principle\n\nStoring a secret in a hardcoded manner can be made safer, by making sure the secret grants the least amount of privilege as needed by the application.\nFor example - if the application needs to read a specific table from a specific database, and the secret grants access to perform this operation **only** (meaning - no access to other tables, no write access at all) then the damage from any secret leaks is mitigated.\nThat being said, it is still not recommended to store secrets in a hardcoded manner, since this type of storage does not offer any way to revoke or moderate the usage of the secret.\n" + }, + "properties": { + "applicability": "undetermined", + "conclusion": "private", + "scanner_id": null + } + } + ], + "version": "1.0" + } + }, + "invocations": [ + { + "arguments": [ + "/Users/assafa/.jfrog/dependencies/analyzerManager/jas_scanner/jas_scanner", + "scan", + "/var/folders/xv/th4cksxn7jv9wjrdnn1h4tj00000gq/T/jfrog.cli.temp.-1747638540-1087327516/Secrets_1747638541/config.yaml" + ], + "executionSuccessful": true, + "workingDirectory": { + "uri": "file:////var/folders/xv/th4cksxn7jv9wjrdnn1h4tj00000gq/T/jfrog.cli.temp.-1747638503-4092437536" + } + } + ], + "results": [ + { + "properties": { + "metadata": "", + "tokenValidation": "" + }, + "ruleId": "REQ.SECRET.GENERIC.TEXT", + "level": "error", + "message": { + "text": "Hardcoded secrets were found" + }, + "locations": [ + { + "physicalLocation": { + "artifactLocation": { + "uri": "file:///private/var/folders/xv/th4cksxn7jv9wjrdnn1h4tj00000gq/T/jfrog.cli.temp.-1747638503-4092437536/TOKENS" + }, + "region": { + "startLine": 1, + "startColumn": 1, + "endLine": 1, + "endColumn": 30, + "snippet": { + "text": "password: jnvkjcxnjvxnvk22222" + } + } + } + } + ], + "partialFingerprints": { + "jfrogSecret": "085de62ad4aa0dc22cf7d733811687c08b5517c4414326723e05e75a822ee58d" + } + } + ] + } + ] +} diff --git a/utils/parallel_runner.go b/utils/parallel_runner.go index a8435cb8e..f9970661b 100644 --- a/utils/parallel_runner.go +++ b/utils/parallel_runner.go @@ -2,6 +2,7 @@ package utils import ( "github.com/jfrog/gofrog/parallel" + "github.com/jfrog/jfrog-client-go/utils/log" "sync" ) @@ -15,6 +16,18 @@ type SecurityParallelRunner struct { onScanEndFunc func() } +// WrapTaskWithLoggerPropagation wraps a parallel task to propagate the current goroutine's logger +// to worker goroutines. This is needed when using BufferedLogger for isolated parallel logging - +// worker goroutines need to inherit the parent's logger so their logs are captured in the same buffer. +func WrapTaskWithLoggerPropagation(task parallel.TaskFunc) parallel.TaskFunc { + currentLogger := log.GetLogger() + return func(threadId int) error { + log.SetLoggerForGoroutine(currentLogger) + defer log.ClearLoggerForGoroutine() + return task(threadId) + } +} + func NewSecurityParallelRunner(numOfParallelScans int) SecurityParallelRunner { return SecurityParallelRunner{Runner: parallel.NewRunner(numOfParallelScans, 20000, false)} } diff --git a/utils/results/diff.go b/utils/results/diff.go index 0594be3f3..7cc7212e8 100644 --- a/utils/results/diff.go +++ b/utils/results/diff.go @@ -1,33 +1,21 @@ package results import ( - "path/filepath" - "strings" - + "github.com/jfrog/jfrog-cli-security/utils/formats/sarifutils" "github.com/jfrog/jfrog-cli-security/utils/jasutils" "github.com/jfrog/jfrog-client-go/utils/log" "github.com/owenrumney/go-sarif/v3/pkg/report/v210/sarif" ) -// UnifyScaAndJasResults merges SCA and JAS diff results into a single SecurityCommandResults. -func UnifyScaAndJasResults(scaResults, jasDiffResults *SecurityCommandResults) *SecurityCommandResults { - gitContext := scaResults.GitContext - if gitContext == nil { - gitContext = jasDiffResults.GitContext - } - +// MergeScaAndJasResults merges SCA results with JAS diff results into a single SecurityCommandResults. +// SCA results provide the base (including ScaResults and GitContext), JAS results provide the JAS findings. +func MergeScaAndJasResults(scaResults, jasDiffResults *SecurityCommandResults) *SecurityCommandResults { unifiedResults := &SecurityCommandResults{ - ResultsMetaData: ResultsMetaData{ - EntitledForJas: jasDiffResults.EntitledForJas, - SecretValidation: jasDiffResults.SecretValidation, - CmdType: jasDiffResults.CmdType, - XrayVersion: jasDiffResults.XrayVersion, - XscVersion: jasDiffResults.XscVersion, - MultiScanId: jasDiffResults.MultiScanId, - StartTime: jasDiffResults.StartTime, - ResultContext: jasDiffResults.ResultContext, - GitContext: gitContext, - }, + ResultsMetaData: jasDiffResults.ResultsMetaData, + } + // Prefer SCA's GitContext (contains PR upload path info) + if scaResults.GitContext != nil { + unifiedResults.GitContext = scaResults.GitContext } for _, scaTarget := range scaResults.Targets { @@ -72,16 +60,7 @@ func CompareJasResults(targetResults, sourceResults *SecurityCommandResults) *Se log.Debug("[DIFF] Comparing", len(sourceResults.Targets), "source targets against", len(targetResults.Targets), "target targets") diffResults := &SecurityCommandResults{ - ResultsMetaData: ResultsMetaData{ - EntitledForJas: sourceResults.EntitledForJas, - SecretValidation: sourceResults.SecretValidation, - CmdType: sourceResults.CmdType, - XrayVersion: sourceResults.XrayVersion, - XscVersion: sourceResults.XscVersion, - MultiScanId: sourceResults.MultiScanId, - StartTime: sourceResults.StartTime, - ResultContext: sourceResults.ResultContext, - }, + ResultsMetaData: sourceResults.ResultsMetaData, } for _, sourceTarget := range sourceResults.Targets { @@ -146,8 +125,6 @@ func filterExistingFindings(allTargetJasResults []*JasScansResults, sourceJasRes } } - log.Debug("[DIFF] Built target fingerprint set with", len(targetKeys), "unique keys") - sourceSecrets := countSarifResults(sourceJasResults.JasVulnerabilities.SecretsScanResults) + countSarifResults(sourceJasResults.JasViolations.SecretsScanResults) sourceIac := countSarifResults(sourceJasResults.JasVulnerabilities.IacScanResults) + @@ -159,18 +136,18 @@ func filterExistingFindings(allTargetJasResults []*JasScansResults, sourceJasRes filteredJasResults := &JasScansResults{} - filteredJasResults.JasVulnerabilities.SecretsScanResults = filterSarifRuns( + filteredJasResults.JasVulnerabilities.SecretsScanResults = filterNewSarifFindings( sourceJasResults.JasVulnerabilities.SecretsScanResults, targetKeys) - filteredJasResults.JasVulnerabilities.IacScanResults = filterSarifRuns( + filteredJasResults.JasVulnerabilities.IacScanResults = filterNewSarifFindings( sourceJasResults.JasVulnerabilities.IacScanResults, targetKeys) - filteredJasResults.JasVulnerabilities.SastScanResults = filterSarifRuns( + filteredJasResults.JasVulnerabilities.SastScanResults = filterNewSarifFindings( sourceJasResults.JasVulnerabilities.SastScanResults, targetKeys) - filteredJasResults.JasViolations.SecretsScanResults = filterSarifRuns( + filteredJasResults.JasViolations.SecretsScanResults = filterNewSarifFindings( sourceJasResults.JasViolations.SecretsScanResults, targetKeys) - filteredJasResults.JasViolations.IacScanResults = filterSarifRuns( + filteredJasResults.JasViolations.IacScanResults = filterNewSarifFindings( sourceJasResults.JasViolations.IacScanResults, targetKeys) - filteredJasResults.JasViolations.SastScanResults = filterSarifRuns( + filteredJasResults.JasViolations.SastScanResults = filterNewSarifFindings( sourceJasResults.JasViolations.SastScanResults, targetKeys) diffSecrets := countSarifResults(filteredJasResults.JasVulnerabilities.SecretsScanResults) + @@ -198,14 +175,14 @@ func countSarifResults(runs []*sarif.Run) int { func extractFingerprints(run *sarif.Run, targetKeys map[string]bool) { for _, result := range run.Results { - if result.Fingerprints != nil { - key := getResultFingerprint(result) + if sarifutils.IsFingerprintsExists(result) { + key := getSastFingerprint(result) if key != "" { targetKeys[key] = true } } else { for _, location := range result.Locations { - key := getRelativeLocationFileName(location, run.Invocations) + getLocationSnippetText(location) + key := sarifutils.GetRelativeLocationFileName(location, run.Invocations) + sarifutils.GetLocationSnippetText(location) targetKeys[key] = true } } @@ -215,13 +192,16 @@ func extractFingerprints(run *sarif.Run, targetKeys map[string]bool) { func extractLocationsOnly(run *sarif.Run, targetKeys map[string]bool) { for _, result := range run.Results { for _, location := range result.Locations { - key := getRelativeLocationFileName(location, run.Invocations) + getLocationSnippetText(location) + key := sarifutils.GetRelativeLocationFileName(location, run.Invocations) + sarifutils.GetLocationSnippetText(location) targetKeys[key] = true } } } -func getResultFingerprint(result *sarif.Result) string { +// getSastFingerprint extracts the SAST fingerprint used for diff matching. +// Note: Uses "precise_sink_and_sink_function" key (from Analyzer Manager for diff purposes), +// which differs from jasutils.SastFingerprintKey ("significant_full_path") used elsewhere. +func getSastFingerprint(result *sarif.Result) string { if result.Fingerprints != nil { if value, ok := result.Fingerprints["precise_sink_and_sink_function"]; ok { return value @@ -230,67 +210,24 @@ func getResultFingerprint(result *sarif.Result) string { return "" } -func getLocationSnippetText(location *sarif.Location) string { - if location.PhysicalLocation != nil && location.PhysicalLocation.Region != nil && - location.PhysicalLocation.Region.Snippet != nil && location.PhysicalLocation.Region.Snippet.Text != nil { - return *location.PhysicalLocation.Region.Snippet.Text - } - return "" -} - -func getRelativeLocationFileName(location *sarif.Location, invocations []*sarif.Invocation) string { - wd := "" - if len(invocations) > 0 { - wd = getInvocationWorkingDirectory(invocations[0]) - } - filePath := getLocationFileName(location) - if filePath != "" { - return extractRelativePath(filePath, wd) - } - return "" -} - -func getInvocationWorkingDirectory(invocation *sarif.Invocation) string { - if invocation != nil && invocation.WorkingDirectory != nil && invocation.WorkingDirectory.URI != nil { - return *invocation.WorkingDirectory.URI - } - return "" -} - -func getLocationFileName(location *sarif.Location) string { - if location != nil && location.PhysicalLocation != nil && location.PhysicalLocation.ArtifactLocation != nil && location.PhysicalLocation.ArtifactLocation.URI != nil { - return *location.PhysicalLocation.ArtifactLocation.URI - } - return "" -} - -func extractRelativePath(resultPath string, projectRoot string) string { - resultPath = strings.TrimPrefix(resultPath, "file:///private") - resultPath = strings.TrimPrefix(resultPath, "file:///") - projectRoot = strings.TrimPrefix(projectRoot, "file:///private") - projectRoot = strings.TrimPrefix(projectRoot, "file:///") - projectRoot = strings.TrimPrefix(projectRoot, "/") - - relativePath := strings.ReplaceAll(resultPath, projectRoot, "") - trimSlash := strings.TrimPrefix(relativePath, string(filepath.Separator)) - return strings.TrimPrefix(trimSlash, "/") -} - -func filterSarifRuns(sourceRuns []*sarif.Run, targetKeys map[string]bool) []*sarif.Run { +// filterNewSarifFindings removes findings from sourceRuns that already exist in targetKeys. +// For SAST results with fingerprints, matches by fingerprint. +// For Secrets/IaC results, matches by file location + snippet text. +func filterNewSarifFindings(sourceRuns []*sarif.Run, targetKeys map[string]bool) []*sarif.Run { var filteredRuns []*sarif.Run for _, run := range sourceRuns { var filteredResults []*sarif.Result for _, result := range run.Results { - if result.Fingerprints != nil { - if !targetKeys[getResultFingerprint(result)] { + if sarifutils.IsFingerprintsExists(result) { + if !targetKeys[getSastFingerprint(result)] { filteredResults = append(filteredResults, result) } } else { var filteredLocations []*sarif.Location for _, location := range result.Locations { - key := getRelativeLocationFileName(location, run.Invocations) + getLocationSnippetText(location) + key := sarifutils.GetRelativeLocationFileName(location, run.Invocations) + sarifutils.GetLocationSnippetText(location) if !targetKeys[key] { filteredLocations = append(filteredLocations, location) } diff --git a/utils/results/diff_test.go b/utils/results/diff_test.go index 6a6143259..e0f103de4 100644 --- a/utils/results/diff_test.go +++ b/utils/results/diff_test.go @@ -5,6 +5,7 @@ import ( "path/filepath" "testing" + "github.com/jfrog/jfrog-cli-security/utils/formats/sarifutils" "github.com/owenrumney/go-sarif/v3/pkg/report/v210/sarif" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -143,7 +144,7 @@ func TestFilterSarifRuns_LocationBased(t *testing.T) { } // Filter source runs - filteredRuns := filterSarifRuns(tc.sourceRuns, targetKeys) + filteredRuns := filterNewSarifFindings(tc.sourceRuns, targetKeys) // Count results resultCount := countSarifResults(filteredRuns) @@ -251,7 +252,7 @@ func TestFilterSarifRuns_FingerprintBased(t *testing.T) { } // Filter source runs - filteredRuns := filterSarifRuns(tc.sourceRuns, targetKeys) + filteredRuns := filterNewSarifFindings(tc.sourceRuns, targetKeys) // Count results resultCount := countSarifResults(filteredRuns) @@ -348,55 +349,17 @@ func TestFilterSarifRuns_WithSnippets(t *testing.T) { extractLocationsOnly(run, targetKeys) } - filteredRuns := filterSarifRuns(tc.sourceRuns, targetKeys) + filteredRuns := filterNewSarifFindings(tc.sourceRuns, targetKeys) resultCount := countSarifResults(filteredRuns) assert.Equal(t, tc.expectedCount, resultCount) }) } } -func TestExtractRelativePath(t *testing.T) { - testCases := []struct { - name string - resultPath string - projectRoot string - expected string - }{ - { - name: "simple relative path", - resultPath: "/tmp/project/src/file.js", - projectRoot: "/tmp/project", - expected: "src/file.js", - }, - { - name: "file prefix removal", - resultPath: "file:///tmp/project/src/file.js", - projectRoot: "/tmp/project", - expected: "src/file.js", - }, - { - name: "private prefix removal (macOS)", - resultPath: "file:///private/var/folders/tmp/project/src/file.js", - projectRoot: "/var/folders/tmp/project", - expected: "src/file.js", - }, - { - name: "empty project root", - resultPath: "src/file.js", - projectRoot: "", - expected: "src/file.js", - }, - } +// Note: Tests for extractRelativePath, getLocationSnippetText, getLocationFileName, and +// getInvocationWorkingDirectory have been removed as these now use sarifutils functions. - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := extractRelativePath(tc.resultPath, tc.projectRoot) - assert.Equal(t, tc.expected, result) - }) - } -} - -func TestGetResultFingerprint(t *testing.T) { +func TestGetSastFingerprint(t *testing.T) { testCases := []struct { name string result *sarif.Result @@ -429,122 +392,7 @@ func TestGetResultFingerprint(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - result := getResultFingerprint(tc.result) - assert.Equal(t, tc.expected, result) - }) - } -} - -func TestGetLocationSnippetText(t *testing.T) { - testCases := []struct { - name string - location *sarif.Location - expected string - }{ - { - name: "has snippet", - location: &sarif.Location{ - PhysicalLocation: &sarif.PhysicalLocation{ - Region: &sarif.Region{ - Snippet: &sarif.ArtifactContent{Text: strPtr("password = 'secret'")}, - }, - }, - }, - expected: "password = 'secret'", - }, - { - name: "no snippet", - location: &sarif.Location{ - PhysicalLocation: &sarif.PhysicalLocation{ - Region: &sarif.Region{}, - }, - }, - expected: "", - }, - { - name: "nil region", - location: &sarif.Location{ - PhysicalLocation: &sarif.PhysicalLocation{}, - }, - expected: "", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := getLocationSnippetText(tc.location) - assert.Equal(t, tc.expected, result) - }) - } -} - -func TestGetLocationFileName(t *testing.T) { - testCases := []struct { - name string - location *sarif.Location - expected string - }{ - { - name: "has file name", - location: &sarif.Location{ - PhysicalLocation: &sarif.PhysicalLocation{ - ArtifactLocation: &sarif.ArtifactLocation{URI: strPtr("src/main.js")}, - }, - }, - expected: "src/main.js", - }, - { - name: "nil artifact location", - location: &sarif.Location{ - PhysicalLocation: &sarif.PhysicalLocation{}, - }, - expected: "", - }, - { - name: "nil location", - location: nil, - expected: "", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := getLocationFileName(tc.location) - assert.Equal(t, tc.expected, result) - }) - } -} - -func TestGetInvocationWorkingDirectory(t *testing.T) { - testCases := []struct { - name string - invocation *sarif.Invocation - expected string - }{ - { - name: "has working directory", - invocation: &sarif.Invocation{ - WorkingDirectory: &sarif.ArtifactLocation{URI: strPtr("/tmp/project")}, - }, - expected: "/tmp/project", - }, - { - name: "nil working directory", - invocation: &sarif.Invocation{ - WorkingDirectory: nil, - }, - expected: "", - }, - { - name: "nil invocation", - invocation: nil, - expected: "", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := getInvocationWorkingDirectory(tc.invocation) + result := getSastFingerprint(tc.result) assert.Equal(t, tc.expected, result) }) } @@ -622,8 +470,8 @@ func TestFilterSarifRuns_RealSecretsData(t *testing.T) { require.NotEmpty(t, sourceReport.Runs, "Source should have runs") // Verify both files contain the same secret content (snippet) - targetSnippet := getLocationSnippetText(targetReport.Runs[0].Results[0].Locations[0]) - sourceSnippet := getLocationSnippetText(sourceReport.Runs[0].Results[0].Locations[0]) + targetSnippet := sarifutils.GetLocationSnippetText(targetReport.Runs[0].Results[0].Locations[0]) + sourceSnippet := sarifutils.GetLocationSnippetText(sourceReport.Runs[0].Results[0].Locations[0]) assert.Equal(t, targetSnippet, sourceSnippet, "Both files should have the same secret snippet") assert.Equal(t, "password: jnvkjcxnjvxnvk22222", targetSnippet) @@ -633,11 +481,11 @@ func TestFilterSarifRuns_RealSecretsData(t *testing.T) { for _, result := range run.Results { for _, location := range result.Locations { // Use just filename (last path component) + snippet for matching - fileName := getLocationFileName(location) + fileName := sarifutils.GetLocationFileName(location) if fileName != "" { fileName = filepath.Base(fileName) } - key := fileName + getLocationSnippetText(location) + key := fileName + sarifutils.GetLocationSnippetText(location) targetKeys[key] = true } } @@ -649,11 +497,11 @@ func TestFilterSarifRuns_RealSecretsData(t *testing.T) { for _, result := range run.Results { var filteredLocations []*sarif.Location for _, location := range result.Locations { - fileName := getLocationFileName(location) + fileName := sarifutils.GetLocationFileName(location) if fileName != "" { fileName = filepath.Base(fileName) } - key := fileName + getLocationSnippetText(location) + key := fileName + sarifutils.GetLocationSnippetText(location) if !targetKeys[key] { filteredLocations = append(filteredLocations, location) }