diff --git a/.gitignore b/.gitignore
index 1149c51..c5e5a45 100644
--- a/.gitignore
+++ b/.gitignore
@@ -32,3 +32,6 @@ go.work.sum
# .vscode/
build/
+
+# AI encryption key
+.encryption_key
diff --git a/README.md b/README.md
index cde96fc..7882310 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
-# PatchMon Agent
+# PatchMonEnhanced Agent
-PatchMon's monitoring agent sends package and repository information to the PatchMon server.
+PatchMonEnhanced's monitoring agent sends package and repository information to the PatchMonEnhanced server.
## Installation
@@ -180,7 +180,7 @@ Logs are written to `/var/log/patchmon-agent.log` with timestamps and structured
```
2023-09-27T10:30:00 level=info msg="Collecting package information..."
2023-09-27T10:30:01 level=info msg="Found packages" count=156
-2023-09-27T10:30:02 level=info msg="Sending report to PatchMon server..."
+2023-09-27T10:30:02 level=info msg="Sending report to PatchMonEnhanced server..."
2023-09-27T10:30:03 level=info msg="Report sent successfully"
```
@@ -232,7 +232,7 @@ The Go implementation maintains compatibility with the existing shell script wor
1. **Same command structure**: All commands work identically
2. **Same configuration files**: Uses the same paths and formats
-3. **Same API compatibility**: Works with existing PatchMon servers
+3. **Same API compatibility**: Works with existing PatchMonEnhanced servers
4. **Improved performance**: Faster execution and better error handling
To migrate:
diff --git a/cmd/patchmon-agent/commands/report.go b/cmd/patchmon-agent/commands/report.go
index 65453cc..adf6353 100644
--- a/cmd/patchmon-agent/commands/report.go
+++ b/cmd/patchmon-agent/commands/report.go
@@ -5,11 +5,13 @@ import (
"encoding/json"
"fmt"
"os"
+ "sync"
"time"
"patchmon-agent/internal/client"
"patchmon-agent/internal/hardware"
"patchmon-agent/internal/integrations"
+ "patchmon-agent/internal/integrations/compliance"
"patchmon-agent/internal/integrations/docker"
"patchmon-agent/internal/network"
"patchmon-agent/internal/packages"
@@ -102,10 +104,10 @@ func sendReport(outputJson bool) error {
needsReboot, rebootReason := systemDetector.CheckRebootRequired()
installedKernel := systemDetector.GetLatestInstalledKernel()
logger.WithFields(logrus.Fields{
- "needs_reboot": needsReboot,
- "reason": rebootReason,
- "installed_kernel": installedKernel,
- "running_kernel": systemInfo.KernelVersion,
+ "needs_reboot": needsReboot,
+ "reason": rebootReason,
+ "installed_kernel": installedKernel,
+ "running_kernel": systemInfo.KernelVersion,
}).Info("Reboot status check completed")
// Get package information
@@ -172,31 +174,31 @@ func sendReport(outputJson bool) error {
// Create payload
payload := &models.ReportPayload{
- Packages: packageList,
- Repositories: repoList,
- OSType: osType,
- OSVersion: osVersion,
- Hostname: hostname,
- IP: ipAddress,
- Architecture: architecture,
- AgentVersion: version.Version,
- MachineID: systemDetector.GetMachineID(),
- KernelVersion: systemInfo.KernelVersion,
+ Packages: packageList,
+ Repositories: repoList,
+ OSType: osType,
+ OSVersion: osVersion,
+ Hostname: hostname,
+ IP: ipAddress,
+ Architecture: architecture,
+ AgentVersion: version.Version,
+ MachineID: systemDetector.GetMachineID(),
+ KernelVersion: systemInfo.KernelVersion,
InstalledKernelVersion: installedKernel,
- SELinuxStatus: systemInfo.SELinuxStatus,
- SystemUptime: systemInfo.SystemUptime,
- LoadAverage: systemInfo.LoadAverage,
- CPUModel: hardwareInfo.CPUModel,
- CPUCores: hardwareInfo.CPUCores,
- RAMInstalled: hardwareInfo.RAMInstalled,
- SwapSize: hardwareInfo.SwapSize,
- DiskDetails: hardwareInfo.DiskDetails,
- GatewayIP: networkInfo.GatewayIP,
- DNSServers: networkInfo.DNSServers,
- NetworkInterfaces: networkInfo.NetworkInterfaces,
- ExecutionTime: executionTime,
- NeedsReboot: needsReboot,
- RebootReason: rebootReason,
+ SELinuxStatus: systemInfo.SELinuxStatus,
+ SystemUptime: systemInfo.SystemUptime,
+ LoadAverage: systemInfo.LoadAverage,
+ CPUModel: hardwareInfo.CPUModel,
+ CPUCores: hardwareInfo.CPUCores,
+ RAMInstalled: hardwareInfo.RAMInstalled,
+ SwapSize: hardwareInfo.SwapSize,
+ DiskDetails: hardwareInfo.DiskDetails,
+ GatewayIP: networkInfo.GatewayIP,
+ DNSServers: networkInfo.DNSServers,
+ NetworkInterfaces: networkInfo.NetworkInterfaces,
+ ExecutionTime: executionTime,
+ NeedsReboot: needsReboot,
+ RebootReason: rebootReason,
}
// If --report-json flag is set, output JSON and exit
@@ -241,13 +243,27 @@ func sendReport(outputJson bool) error {
return nil
}
} else {
- // Proactive update check after report (non-blocking with timeout)
- // Run in a goroutine to avoid blocking the report completion
+ // Proactive update check after report (with timeout to prevent hanging)
+ // Use a WaitGroup to ensure the goroutine completes before function returns
+ var wg sync.WaitGroup
+ wg.Add(1)
go func() {
+ defer wg.Done()
+
+ // Create a context with timeout to prevent indefinite hanging
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
+ defer cancel()
+
// Add a delay to prevent immediate checks after service restart
// This gives the new process time to fully initialize
- time.Sleep(5 * time.Second)
-
+ select {
+ case <-time.After(5 * time.Second):
+ // Continue with update check
+ case <-ctx.Done():
+ logger.Debug("Update check cancelled due to timeout")
+ return
+ }
+
logger.Info("Checking for agent updates...")
versionInfo, err := getServerVersionInfo()
if err != nil {
@@ -277,6 +293,8 @@ func sendReport(outputJson bool) error {
logger.WithField("version", versionInfo.CurrentVersion).Info("Agent is up to date")
}
}()
+ // Wait for the update check to complete (with the internal timeout)
+ wg.Wait()
}
// Collect and send integration data (Docker, etc.) separately
@@ -305,11 +323,23 @@ func sendIntegrationData() {
// Register available integrations
integrationMgr.Register(docker.New(logger))
+
+ // Only register compliance integration if not set to on-demand only
+ // When compliance_on_demand_only is true, compliance scans will only run when triggered from the UI
+ if !cfgManager.IsComplianceOnDemandOnly() {
+ complianceInteg := compliance.New(logger)
+ complianceInteg.SetDockerIntegrationEnabled(cfgManager.IsIntegrationEnabled("docker"))
+ integrationMgr.Register(complianceInteg)
+ } else {
+ logger.Info("Skipping compliance scan during scheduled report (compliance_on_demand_only=true)")
+ }
// Future: integrationMgr.Register(proxmox.New(logger))
// Future: integrationMgr.Register(kubernetes.New(logger))
// Discover and collect from all available integrations
- ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
+ // 25 minute timeout to allow OpenSCAP scans to complete (they can take 15+ minutes on complex systems)
+ // This gives time for both OpenSCAP and Docker Bench to complete
+ ctx, cancel := context.WithTimeout(context.Background(), 25*time.Minute)
defer cancel()
integrationData := integrationMgr.CollectAll(ctx)
@@ -332,6 +362,11 @@ func sendIntegrationData() {
sendDockerData(httpClient, dockerData, hostname, machineID)
}
+ // Send Compliance data if available
+ if complianceData, exists := integrationData["compliance"]; exists && complianceData.Error == "" {
+ sendComplianceData(httpClient, complianceData, hostname, machineID)
+ }
+
// Future: Send other integration data here
}
@@ -375,3 +410,49 @@ func sendDockerData(httpClient *client.Client, integrationData *models.Integrati
"updates": response.UpdatesFound,
}).Info("Docker data sent successfully")
}
+
+// sendComplianceData sends compliance scan data to server
+func sendComplianceData(httpClient *client.Client, integrationData *models.IntegrationData, hostname, machineID string) {
+ // Extract Compliance data from integration data
+ complianceData, ok := integrationData.Data.(*models.ComplianceData)
+ if !ok {
+ logger.Warn("Failed to extract compliance data from integration")
+ return
+ }
+
+ if len(complianceData.Scans) == 0 {
+ logger.Debug("No compliance scans to send")
+ return
+ }
+
+ payload := &models.CompliancePayload{
+ ComplianceData: *complianceData,
+ Hostname: hostname,
+ MachineID: machineID,
+ AgentVersion: version.Version,
+ }
+
+ totalRules := 0
+ for _, scan := range complianceData.Scans {
+ totalRules += scan.TotalRules
+ }
+
+ logger.WithFields(logrus.Fields{
+ "scans": len(complianceData.Scans),
+ "total_rules": totalRules,
+ }).Info("Sending compliance data to server...")
+
+ ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) // Longer timeout for compliance
+ defer cancel()
+
+ response, err := httpClient.SendComplianceData(ctx, payload)
+ if err != nil {
+ logger.WithError(err).Warn("Failed to send compliance data (will retry on next report)")
+ return
+ }
+
+ logger.WithFields(logrus.Fields{
+ "scans_received": response.ScansReceived,
+ "message": response.Message,
+ }).Info("Compliance data sent successfully")
+}
diff --git a/cmd/patchmon-agent/commands/root.go b/cmd/patchmon-agent/commands/root.go
index caef737..0b544dd 100644
--- a/cmd/patchmon-agent/commands/root.go
+++ b/cmd/patchmon-agent/commands/root.go
@@ -24,8 +24,9 @@ var (
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
- Use: "patchmon-agent",
- Short: "PatchMon Agent for package monitoring",
+ Use: "patchmon-agent",
+ Short: "PatchMon Agent for package monitoring",
+ Version: version.Version,
Long: `PatchMon Agent v` + version.Version + `
A monitoring agent that sends package information to PatchMon.`,
@@ -87,7 +88,8 @@ func initialiseAgent() {
if logFile == "" {
logFile = config.DefaultLogFile
}
- _ = os.MkdirAll(filepath.Dir(logFile), 0755)
+ // SECURITY: Use 0750 for log directory (no world access)
+ _ = os.MkdirAll(filepath.Dir(logFile), 0750)
logger.SetOutput(&lumberjack.Logger{Filename: logFile, MaxSize: 10, MaxBackups: 5, MaxAge: 14, Compress: true})
}
@@ -132,4 +134,3 @@ func checkRoot() error {
}
return nil
}
-
diff --git a/cmd/patchmon-agent/commands/serve.go b/cmd/patchmon-agent/commands/serve.go
index 1d163af..0b20ec4 100644
--- a/cmd/patchmon-agent/commands/serve.go
+++ b/cmd/patchmon-agent/commands/serve.go
@@ -2,19 +2,27 @@ package commands
import (
"context"
+ "crypto/rand"
"crypto/tls"
+ "encoding/hex"
"encoding/json"
"fmt"
"net/http"
"os"
"os/exec"
+ "path/filepath"
+ "regexp"
"strings"
+ "syscall"
"time"
"patchmon-agent/internal/client"
"patchmon-agent/internal/integrations"
+ "patchmon-agent/internal/integrations/compliance"
"patchmon-agent/internal/integrations/docker"
+ "patchmon-agent/internal/system"
"patchmon-agent/internal/utils"
+ "patchmon-agent/internal/version"
"patchmon-agent/pkg/models"
"github.com/gorilla/websocket"
@@ -154,15 +162,7 @@ func runService() error {
logger.Info("✅ Startup notification sent to server")
}
- // initial report on boot
- logger.Info("Sending initial report on startup...")
- if err := sendReport(false); err != nil {
- logger.WithError(err).Warn("initial report failed")
- } else {
- logger.Info("✅ Initial report sent successfully")
- }
-
- // start websocket loop
+ // Start websocket loop FIRST so agent appears online immediately
logger.Info("Establishing WebSocket connection...")
messages := make(chan wsMsg, 10)
dockerEvents := make(chan interface{}, 100)
@@ -171,7 +171,24 @@ func runService() error {
// Start integration monitoring (Docker real-time events, etc.)
startIntegrationMonitoring(ctx, dockerEvents)
- // Create ticker with initial interval
+ // Report current integration status on startup (wait a moment for WebSocket)
+ go func() {
+ time.Sleep(2 * time.Second)
+ reportIntegrationStatus(ctx)
+ }()
+
+ // Run initial report in background so it doesn't block WebSocket
+ // Compliance scans can take 5-10 minutes, we don't want agent to appear offline
+ go func() {
+ logger.Info("Sending initial report on startup (background)...")
+ if err := sendReport(false); err != nil {
+ logger.WithError(err).Warn("initial report failed")
+ } else {
+ logger.Info("✅ Initial report sent successfully")
+ }
+ }()
+
+ // Create ticker with initial interval for package reports
ticker := time.NewTicker(time.Duration(intervalMinutes) * time.Minute)
defer ticker.Stop()
@@ -245,6 +262,12 @@ func runService() error {
if err := updateAgent(); err != nil {
logger.WithError(err).Warn("update_agent failed")
}
+ case "refresh_integration_status":
+ logger.Info("Refreshing integration status on server request...")
+ go reportIntegrationStatus(ctx)
+ case "docker_inventory_refresh":
+ logger.Info("Refreshing Docker inventory on server request...")
+ go refreshDockerInventory(ctx)
case "update_notification":
logger.WithField("version", m.version).Info("Update notification received from server")
if m.force {
@@ -264,11 +287,356 @@ func runService() error {
"enabled": m.integrationEnabled,
}).Info("Integration toggled successfully, service will restart")
}
+ case "compliance_scan":
+ logger.WithFields(map[string]interface{}{
+ "profile_type": m.profileType,
+ "profile_id": m.profileID,
+ "enable_remediation": m.enableRemediation,
+ }).Info("Running on-demand compliance scan...")
+ go func(msg wsMsg) {
+ options := &models.ComplianceScanOptions{
+ ProfileID: msg.profileID,
+ EnableRemediation: msg.enableRemediation,
+ FetchRemoteResources: msg.fetchRemoteResources,
+ }
+ if err := runComplianceScanWithOptions(options); err != nil {
+ logger.WithError(err).Warn("compliance_scan failed")
+ } else {
+ if msg.enableRemediation {
+ logger.Info("On-demand compliance scan with remediation completed successfully")
+ } else {
+ logger.Info("On-demand compliance scan completed successfully")
+ }
+ }
+ }(m)
+ case "upgrade_ssg":
+ logger.Info("Upgrading SSG content packages...")
+ go func() {
+ if err := upgradeSSGContent(); err != nil {
+ logger.WithError(err).Warn("upgrade_ssg failed")
+ } else {
+ logger.Info("SSG content packages upgraded successfully")
+ }
+ }()
+ case "remediate_rule":
+ logger.WithField("rule_id", m.ruleID).Info("Remediating single rule...")
+ go func(ruleID string) {
+ if err := remediateSingleRule(ruleID); err != nil {
+ logger.WithError(err).WithField("rule_id", ruleID).Warn("remediate_rule failed")
+ } else {
+ logger.WithField("rule_id", ruleID).Info("Single rule remediation completed")
+ }
+ }(m.ruleID)
+ case "docker_image_scan":
+ logger.WithFields(map[string]interface{}{
+ "image_name": m.imageName,
+ "container_name": m.containerName,
+ "scan_all_images": m.scanAllImages,
+ }).Info("Running Docker image CVE scan...")
+ go func(msg wsMsg) {
+ if err := runDockerImageScan(msg.imageName, msg.containerName, msg.scanAllImages); err != nil {
+ logger.WithError(err).Warn("docker_image_scan failed")
+ } else {
+ logger.Info("Docker image CVE scan completed successfully")
+ }
+ }(m)
+ case "set_compliance_on_demand_only":
+ logger.WithField("on_demand_only", m.complianceOnDemandOnly).Info("Setting compliance on-demand only mode...")
+ if err := cfgManager.SetComplianceOnDemandOnly(m.complianceOnDemandOnly); err != nil {
+ logger.WithError(err).Warn("Failed to set compliance_on_demand_only")
+ } else {
+ logger.WithField("on_demand_only", m.complianceOnDemandOnly).Info("Compliance on-demand only mode updated in config.yml")
+ }
}
}
}
}
+// upgradeSSGContent upgrades the SCAP Security Guide content packages
+func upgradeSSGContent() error {
+ // Create compliance integration to access the OpenSCAP scanner
+ complianceInteg := compliance.New(logger)
+ if err := complianceInteg.UpgradeSSGContent(); err != nil {
+ return err
+ }
+
+ // Send updated status to backend after successful upgrade
+ logger.Info("Sending updated compliance status to backend...")
+ httpClient := client.New(cfgManager, logger)
+ ctx := context.Background()
+
+ // Get new scanner details
+ openscapScanner := compliance.NewOpenSCAPScanner(logger)
+ scannerDetails := openscapScanner.GetScannerDetails()
+
+ // Check if Docker integration is enabled for Docker Bench and oscap-docker info
+ dockerIntegrationEnabled := cfgManager.IsIntegrationEnabled("docker")
+ if dockerIntegrationEnabled {
+ dockerBenchScanner := compliance.NewDockerBenchScanner(logger)
+ scannerDetails.DockerBenchAvailable = dockerBenchScanner.IsAvailable()
+
+ oscapDockerScanner := compliance.NewOscapDockerScanner(logger)
+ scannerDetails.OscapDockerAvailable = oscapDockerScanner.IsAvailable()
+ }
+
+ // Send updated status
+ if err := httpClient.SendIntegrationSetupStatus(ctx, &models.IntegrationSetupStatus{
+ Integration: "compliance",
+ Enabled: cfgManager.IsIntegrationEnabled("compliance"),
+ Status: "ready",
+ Message: "SSG content upgraded successfully",
+ ScannerInfo: scannerDetails,
+ }); err != nil {
+ logger.WithError(err).Warn("Failed to send updated compliance status")
+ // Don't fail the upgrade just because status update failed
+ } else {
+ logger.Info("Updated compliance status sent to backend")
+ }
+
+ return nil
+}
+
+// remediateSingleRule remediates a single failed compliance rule
+func remediateSingleRule(ruleID string) error {
+ if ruleID == "" {
+ return fmt.Errorf("rule ID is required")
+ }
+
+ logger.WithField("rule_id", ruleID).Info("Starting single rule remediation")
+
+ // Create compliance integration to run remediation
+ complianceInteg := compliance.New(logger)
+ if !complianceInteg.IsAvailable() {
+ return fmt.Errorf("compliance scanning not available on this system")
+ }
+
+ // Run scan with remediation for just this rule
+ // Use level1_server as the default profile - it contains most common rules
+ // The --rule flag will filter to just the specified rule
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
+ defer cancel()
+
+ options := &models.ComplianceScanOptions{
+ ProfileID: "level1_server", // Use default CIS Level 1 Server profile
+ RuleID: ruleID, // Filter to this specific rule
+ EnableRemediation: true,
+ }
+
+ logger.WithFields(map[string]interface{}{
+ "profile_id": options.ProfileID,
+ "rule_id": options.RuleID,
+ }).Info("Running single rule remediation with oscap")
+
+ _, err := complianceInteg.CollectWithOptions(ctx, options)
+ if err != nil {
+ return fmt.Errorf("remediation failed: %w", err)
+ }
+
+ logger.WithField("rule_id", ruleID).Info("Single rule remediation completed successfully")
+ return nil
+}
+
+// reportIntegrationStatus reports the current status of all enabled integrations
+// This ensures the server knows about integration states and scanner capabilities
+// Called on startup and periodically based on server settings
+func reportIntegrationStatus(ctx context.Context) {
+ logger.Debug("Reporting integration status...")
+
+ // Create HTTP client for API calls
+ httpClient := client.New(cfgManager, logger)
+
+ // Report compliance integration status if enabled
+ if cfgManager.IsIntegrationEnabled("compliance") {
+ // Create scanners to check actual availability
+ openscapScanner := compliance.NewOpenSCAPScanner(logger)
+ dockerBenchScanner := compliance.NewDockerBenchScanner(logger)
+ oscapDockerScanner := compliance.NewOscapDockerScanner(logger)
+
+ // Get scanner details (includes OS info, profiles, etc.)
+ scannerDetails := openscapScanner.GetScannerDetails()
+
+ // Build components status map based on ACTUAL availability
+ components := make(map[string]string)
+
+ // Check OpenSCAP availability
+ if openscapScanner.IsAvailable() {
+ components["openscap"] = "ready"
+ } else {
+ components["openscap"] = "failed"
+ }
+
+ // Check Docker integration and related tools
+ dockerIntegrationEnabled := cfgManager.IsIntegrationEnabled("docker")
+ scannerDetails.DockerBenchAvailable = dockerBenchScanner.IsAvailable()
+
+ if dockerIntegrationEnabled {
+ if dockerBenchScanner.IsAvailable() {
+ components["docker-bench"] = "ready"
+ scannerDetails.AvailableProfiles = append(scannerDetails.AvailableProfiles, models.ScanProfileInfo{
+ ID: "docker-bench",
+ Name: "Docker Bench for Security",
+ Description: "CIS Docker Benchmark security checks",
+ Type: "docker-bench",
+ })
+ } else {
+ components["docker-bench"] = "failed"
+ }
+
+ // Check oscap-docker for container image CVE scanning
+ scannerDetails.OscapDockerAvailable = oscapDockerScanner.IsAvailable()
+ if oscapDockerScanner.IsAvailable() {
+ components["oscap-docker"] = "ready"
+ scannerDetails.AvailableProfiles = append(scannerDetails.AvailableProfiles, models.ScanProfileInfo{
+ ID: "docker-image-cve",
+ Name: "Docker Image CVE Scan",
+ Description: "Scan Docker images for known CVEs using OpenSCAP",
+ Type: "oscap-docker",
+ Category: "docker",
+ })
+ } else {
+ // Check if we're on Ubuntu/Debian where oscap-docker is not supported
+ if _, err := exec.LookPath("apt-get"); err == nil {
+ // Ubuntu/Debian - oscap-docker requires 'atomic' package which isn't available
+ components["oscap-docker"] = "unavailable"
+ } else {
+ components["oscap-docker"] = "failed"
+ }
+ }
+ } else {
+ // Docker integration not enabled - mark as unavailable (not failed)
+ components["docker-bench"] = "unavailable"
+ components["oscap-docker"] = "unavailable"
+ }
+
+ // Determine overall status based on component statuses
+ overallStatus := "ready"
+ statusMessage := "Compliance tools ready"
+ hasReady := false
+ hasFailed := false
+
+ for _, status := range components {
+ if status == "ready" {
+ hasReady = true
+ }
+ if status == "failed" {
+ hasFailed = true
+ }
+ }
+
+ if hasFailed && hasReady {
+ overallStatus = "partial"
+ statusMessage = "Some compliance tools failed to install"
+ } else if hasFailed && !hasReady {
+ overallStatus = "error"
+ statusMessage = "All compliance tools failed to install"
+ }
+
+ if err := httpClient.SendIntegrationSetupStatus(ctx, &models.IntegrationSetupStatus{
+ Integration: "compliance",
+ Enabled: true,
+ Status: overallStatus,
+ Message: statusMessage,
+ Components: components,
+ ScannerInfo: scannerDetails,
+ }); err != nil {
+ logger.WithError(err).Warn("Failed to report compliance status on startup")
+ } else {
+ logger.WithField("status", overallStatus).Info("✅ Compliance integration status reported")
+ }
+ }
+
+ // Report docker integration status if enabled
+ if cfgManager.IsIntegrationEnabled("docker") {
+ dockerInteg := docker.New(logger)
+ if dockerInteg.IsAvailable() {
+ if err := httpClient.SendIntegrationSetupStatus(ctx, &models.IntegrationSetupStatus{
+ Integration: "docker",
+ Enabled: true,
+ Status: "ready",
+ Message: "Docker monitoring ready",
+ }); err != nil {
+ logger.WithError(err).Warn("Failed to report docker status on startup")
+ } else {
+ logger.Info("✅ Docker integration status reported")
+ }
+ }
+ }
+}
+
+// refreshDockerInventory collects and sends Docker inventory data on demand
+// Called when the server requests a Docker data refresh
+func refreshDockerInventory(ctx context.Context) {
+ logger.Info("Starting Docker inventory refresh...")
+
+ // Check if Docker integration is enabled
+ if !cfgManager.IsIntegrationEnabled("docker") {
+ logger.Warn("Docker integration is not enabled, skipping refresh")
+ return
+ }
+
+ // Create Docker integration
+ dockerInteg := docker.New(logger)
+ if !dockerInteg.IsAvailable() {
+ logger.Warn("Docker is not available on this system")
+ return
+ }
+
+ // Collect Docker data with timeout
+ collectCtx, cancel := context.WithTimeout(ctx, 60*time.Second)
+ defer cancel()
+
+ dockerData, err := dockerInteg.Collect(collectCtx)
+ if err != nil {
+ logger.WithError(err).Warn("Failed to collect Docker data")
+ return
+ }
+
+ // Get system info for payload
+ systemDetector := system.New(logger)
+ hostname, _ := systemDetector.GetHostname()
+ machineID := systemDetector.GetMachineID()
+
+ // Extract Docker data from integration data
+ data, ok := dockerData.Data.(*models.DockerData)
+ if !ok {
+ logger.Warn("Failed to extract Docker data from integration")
+ return
+ }
+
+ // Create payload
+ payload := &models.DockerPayload{
+ DockerData: *data,
+ Hostname: hostname,
+ MachineID: machineID,
+ AgentVersion: version.Version,
+ }
+
+ logger.WithFields(map[string]interface{}{
+ "containers": len(data.Containers),
+ "images": len(data.Images),
+ "volumes": len(data.Volumes),
+ "networks": len(data.Networks),
+ }).Info("Sending Docker inventory to server...")
+
+ // Create HTTP client and send data
+ httpClient := client.New(cfgManager, logger)
+ sendCtx, sendCancel := context.WithTimeout(ctx, 30*time.Second)
+ defer sendCancel()
+
+ response, err := httpClient.SendDockerData(sendCtx, payload)
+ if err != nil {
+ logger.WithError(err).Warn("Failed to send Docker inventory")
+ return
+ }
+
+ logger.WithFields(map[string]interface{}{
+ "containers": response.ContainersReceived,
+ "images": response.ImagesReceived,
+ "volumes": response.VolumesReceived,
+ "networks": response.NetworksReceived,
+ }).Info("Docker inventory refresh completed successfully")
+}
+
// startIntegrationMonitoring starts real-time monitoring for integrations that support it
func startIntegrationMonitoring(ctx context.Context, eventChan chan<- interface{}) {
// Create integration manager
@@ -298,14 +666,104 @@ func startIntegrationMonitoring(ctx context.Context, eventChan chan<- interface{
}
type wsMsg struct {
- kind string
- interval int
- version string
- force bool
- integrationName string
- integrationEnabled bool
+ kind string
+ interval int
+ version string
+ force bool
+ integrationName string
+ integrationEnabled bool
+ profileType string // For compliance_scan: openscap, docker-bench, all
+ profileID string // For compliance_scan: specific XCCDF profile ID
+ enableRemediation bool // For compliance_scan: enable auto-remediation
+ fetchRemoteResources bool // For compliance_scan: fetch remote resources
+ ruleID string // For remediate_rule: specific rule ID to remediate
+ imageName string // For docker_image_scan: Docker image to scan
+ containerName string // For docker_image_scan: Docker container to scan
+ scanAllImages bool // For docker_image_scan: scan all images on system
+ complianceOnDemandOnly bool // For set_compliance_on_demand_only
+}
+
+// Input validation patterns for WebSocket message fields
+// These prevent command injection by ensuring only safe characters are allowed
+var (
+ // Profile IDs: alphanumeric, underscores, dots, hyphens (e.g., xccdf_org.ssgproject.content_profile_level1_server)
+ validProfileIDPattern = regexp.MustCompile(`^[a-zA-Z0-9_.\-]+$`)
+ // Rule IDs: same as profile IDs (e.g., xccdf_org.ssgproject.content_rule_audit_rules_...)
+ validRuleIDPattern = regexp.MustCompile(`^[a-zA-Z0-9_.\-]+$`)
+ // Docker image names: alphanumeric, slashes, colons, dots, hyphens, underscores (e.g., ubuntu:22.04, myregistry.io/app:v1)
+ validDockerImagePattern = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.\-/:@]*$`)
+ // Docker container names: alphanumeric, underscores, hyphens (e.g., my-container, container_1)
+ validDockerContainerPattern = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_\-]*$`)
+)
+
+// validateProfileID validates a compliance profile ID to prevent command injection
+func validateProfileID(profileID string) error {
+ if profileID == "" {
+ return nil // Empty is allowed - will use default
+ }
+ if len(profileID) > 256 {
+ return fmt.Errorf("profile ID too long (max 256 chars)")
+ }
+ if !validProfileIDPattern.MatchString(profileID) {
+ return fmt.Errorf("invalid profile ID: contains disallowed characters")
+ }
+ return nil
}
+// validateRuleID validates a compliance rule ID to prevent command injection
+func validateRuleID(ruleID string) error {
+ if ruleID == "" {
+ return fmt.Errorf("rule ID is required")
+ }
+ if len(ruleID) > 256 {
+ return fmt.Errorf("rule ID too long (max 256 chars)")
+ }
+ if !validRuleIDPattern.MatchString(ruleID) {
+ return fmt.Errorf("invalid rule ID: contains disallowed characters")
+ }
+ return nil
+}
+
+// validateDockerImageName validates a Docker image name to prevent command injection
+func validateDockerImageName(imageName string) error {
+ if imageName == "" {
+ return nil // Empty is allowed when scanning all images
+ }
+ if len(imageName) > 512 {
+ return fmt.Errorf("image name too long (max 512 chars)")
+ }
+ if !validDockerImagePattern.MatchString(imageName) {
+ return fmt.Errorf("invalid Docker image name: contains disallowed characters")
+ }
+ return nil
+}
+
+// validateDockerContainerName validates a Docker container name to prevent command injection
+func validateDockerContainerName(containerName string) error {
+ if containerName == "" {
+ return nil // Empty is allowed when scanning images
+ }
+ if len(containerName) > 256 {
+ return fmt.Errorf("container name too long (max 256 chars)")
+ }
+ if !validDockerContainerPattern.MatchString(containerName) {
+ return fmt.Errorf("invalid Docker container name: contains disallowed characters")
+ }
+ return nil
+}
+
+// ComplianceScanProgress represents a progress update during compliance scanning
+type ComplianceScanProgress struct {
+ Phase string `json:"phase"` // started, evaluating, parsing, completed, failed
+ ProfileName string `json:"profile_name"` // Name of the profile being scanned
+ Message string `json:"message"` // Human-readable progress message
+ Progress float64 `json:"progress"` // 0-100 percentage (approximate)
+ Error string `json:"error,omitempty"`
+}
+
+// Global channel for compliance scan progress updates
+var complianceProgressChan = make(chan ComplianceScanProgress, 10)
+
func wsLoop(out chan<- wsMsg, dockerEvents <-chan interface{}) {
backoff := time.Second
for {
@@ -352,29 +810,57 @@ func connectOnce(out chan<- wsMsg, dockerEvents <-chan interface{}) error {
header.Set("X-API-ID", apiID)
header.Set("X-API-KEY", apiKey)
- // Configure WebSocket dialer for insecure connections if needed
+ // SECURITY: Configure WebSocket dialer for insecure connections if needed
+ // WARNING: This exposes the agent to man-in-the-middle attacks!
dialer := websocket.DefaultDialer
if cfgManager.GetConfig().SkipSSLVerify {
+ // SECURITY: Block skip_ssl_verify in production environments
+ if utils.IsProductionEnvironment() {
+ logger.Error("╔══════════════════════════════════════════════════════════════════╗")
+ logger.Error("║ SECURITY ERROR: skip_ssl_verify is BLOCKED in production! ║")
+ logger.Error("║ Set PATCHMON_ENV to 'development' to enable insecure mode. ║")
+ logger.Error("║ This setting cannot be used when PATCHMON_ENV=production ║")
+ logger.Error("╚══════════════════════════════════════════════════════════════════╝")
+ logger.Fatal("Refusing to start with skip_ssl_verify=true in production environment")
+ }
+
+ logger.Error("╔══════════════════════════════════════════════════════════════════╗")
+ logger.Error("║ SECURITY WARNING: TLS verification DISABLED for WebSocket! ║")
+ logger.Error("║ Commands from server could be intercepted or modified. ║")
+ logger.Error("║ Use a valid TLS certificate in production! ║")
+ logger.Error("╚══════════════════════════════════════════════════════════════════╝")
dialer = &websocket.Dialer{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
- logger.Warn("⚠️ SSL certificate verification is disabled for WebSocket")
}
conn, _, err := dialer.Dial(wsURL, header)
if err != nil {
return err
}
- defer func() { _ = conn.Close() }()
- // ping loop
+ // Create a done channel to signal goroutines to stop when connection closes
+ done := make(chan struct{})
+ defer func() {
+ close(done) // Signal all goroutines to stop
+ _ = conn.Close()
+ }()
+
+ // ping loop - now with cancellation support
go func() {
t := time.NewTicker(30 * time.Second)
defer t.Stop()
- for range t.C {
- _ = conn.WriteControl(websocket.PingMessage, nil, time.Now().Add(5*time.Second))
+ for {
+ select {
+ case <-done:
+ return
+ case <-t.C:
+ if err := conn.WriteControl(websocket.PingMessage, nil, time.Now().Add(5*time.Second)); err != nil {
+ return // Connection closed, exit goroutine
+ }
+ }
}
}()
@@ -384,29 +870,76 @@ func connectOnce(out chan<- wsMsg, dockerEvents <-chan interface{}) error {
return conn.SetReadDeadline(time.Now().Add(90 * time.Second))
})
+ // SECURITY: Limit WebSocket message size to prevent DoS attacks (64KB max)
+ conn.SetReadLimit(64 * 1024)
+
logger.WithField("url", wsURL).Info("WebSocket connected")
- // Create a goroutine to send Docker events through WebSocket
+ // Create a goroutine to send Docker events through WebSocket - with cancellation support
+ go func() {
+ for {
+ select {
+ case <-done:
+ return
+ case event, ok := <-dockerEvents:
+ if !ok {
+ return // Channel closed
+ }
+ if dockerEvent, ok := event.(models.DockerStatusEvent); ok {
+ eventJSON, err := json.Marshal(map[string]interface{}{
+ "type": "docker_status",
+ "event": dockerEvent,
+ "container_id": dockerEvent.ContainerID,
+ "name": dockerEvent.Name,
+ "status": dockerEvent.Status,
+ "timestamp": dockerEvent.Timestamp,
+ })
+ if err != nil {
+ logger.WithError(err).Warn("Failed to marshal Docker event")
+ continue
+ }
+
+ if err := conn.WriteMessage(websocket.TextMessage, eventJSON); err != nil {
+ logger.WithError(err).Debug("Failed to send Docker event via WebSocket")
+ return
+ }
+ }
+ }
+ }
+ }()
+
+ // Create a goroutine to send compliance scan progress updates through WebSocket
go func() {
- for event := range dockerEvents {
- if dockerEvent, ok := event.(models.DockerStatusEvent); ok {
- eventJSON, err := json.Marshal(map[string]interface{}{
- "type": "docker_status",
- "event": dockerEvent,
- "container_id": dockerEvent.ContainerID,
- "name": dockerEvent.Name,
- "status": dockerEvent.Status,
- "timestamp": dockerEvent.Timestamp,
+ for {
+ select {
+ case <-done:
+ return
+ case progress, ok := <-complianceProgressChan:
+ if !ok {
+ return // Channel closed
+ }
+ progressJSON, err := json.Marshal(map[string]interface{}{
+ "type": "compliance_scan_progress",
+ "phase": progress.Phase,
+ "profile_name": progress.ProfileName,
+ "message": progress.Message,
+ "progress": progress.Progress,
+ "error": progress.Error,
+ "timestamp": time.Now().Format(time.RFC3339),
})
if err != nil {
- logger.WithError(err).Warn("Failed to marshal Docker event")
+ logger.WithError(err).Warn("Failed to marshal compliance progress event")
continue
}
- if err := conn.WriteMessage(websocket.TextMessage, eventJSON); err != nil {
- logger.WithError(err).Debug("Failed to send Docker event via WebSocket")
+ if err := conn.WriteMessage(websocket.TextMessage, progressJSON); err != nil {
+ logger.WithError(err).Debug("Failed to send compliance progress via WebSocket")
return
}
+ logger.WithFields(map[string]interface{}{
+ "phase": progress.Phase,
+ "message": progress.Message,
+ }).Debug("Sent compliance progress update via WebSocket")
}
}
}()
@@ -416,47 +949,131 @@ func connectOnce(out chan<- wsMsg, dockerEvents <-chan interface{}) error {
if err != nil {
return err
}
+ logger.WithField("raw_message", string(data)).Debug("WebSocket message received")
var payload struct {
- Type string `json:"type"`
- UpdateInterval int `json:"update_interval"`
- Version string `json:"version"`
- Force bool `json:"force"`
- Message string `json:"message"`
- Integration string `json:"integration"`
- Enabled bool `json:"enabled"`
- }
- if json.Unmarshal(data, &payload) == nil {
- switch payload.Type {
- case "settings_update":
- logger.WithField("interval", payload.UpdateInterval).Info("settings_update received")
- out <- wsMsg{kind: "settings_update", interval: payload.UpdateInterval}
- case "report_now":
- logger.Info("report_now received")
- out <- wsMsg{kind: "report_now"}
- case "update_agent":
- logger.Info("update_agent received")
- out <- wsMsg{kind: "update_agent"}
- case "update_notification":
- logger.WithFields(map[string]interface{}{
- "version": payload.Version,
- "force": payload.Force,
- "message": payload.Message,
- }).Info("update_notification received")
- out <- wsMsg{
- kind: "update_notification",
- version: payload.Version,
- force: payload.Force,
- }
- case "integration_toggle":
- logger.WithFields(map[string]interface{}{
- "integration": payload.Integration,
- "enabled": payload.Enabled,
- }).Info("integration_toggle received")
- out <- wsMsg{
- kind: "integration_toggle",
- integrationName: payload.Integration,
- integrationEnabled: payload.Enabled,
- }
+ Type string `json:"type"`
+ UpdateInterval int `json:"update_interval"`
+ Version string `json:"version"`
+ Force bool `json:"force"`
+ Message string `json:"message"`
+ Integration string `json:"integration"`
+ Enabled bool `json:"enabled"`
+ ProfileType string `json:"profile_type"` // For compliance_scan
+ ProfileID string `json:"profile_id"` // For compliance_scan: specific XCCDF profile ID
+ EnableRemediation bool `json:"enable_remediation"` // For compliance_scan
+ FetchRemoteResources bool `json:"fetch_remote_resources"` // For compliance_scan
+ RuleID string `json:"rule_id"` // For remediate_rule: specific rule to remediate
+ ImageName string `json:"image_name"` // For docker_image_scan: Docker image to scan
+ ContainerName string `json:"container_name"` // For docker_image_scan: container to scan
+ ScanAllImages bool `json:"scan_all_images"` // For docker_image_scan: scan all images
+ OnDemandOnly bool `json:"on_demand_only"` // For set_compliance_on_demand_only
+ }
+ if err := json.Unmarshal(data, &payload); err != nil {
+ logger.WithError(err).WithField("data", string(data)).Warn("Failed to parse WebSocket message")
+ continue
+ }
+ logger.WithField("type", payload.Type).Debug("Parsed WebSocket message type")
+ switch payload.Type {
+ case "settings_update":
+ logger.WithField("interval", payload.UpdateInterval).Info("settings_update received")
+ out <- wsMsg{kind: "settings_update", interval: payload.UpdateInterval}
+ case "report_now":
+ logger.Info("report_now received")
+ out <- wsMsg{kind: "report_now"}
+ case "update_agent":
+ logger.Info("update_agent received")
+ out <- wsMsg{kind: "update_agent"}
+ case "refresh_integration_status":
+ logger.Info("refresh_integration_status received")
+ out <- wsMsg{kind: "refresh_integration_status"}
+ case "docker_inventory_refresh":
+ logger.Info("docker_inventory_refresh received")
+ out <- wsMsg{kind: "docker_inventory_refresh"}
+ case "update_notification":
+ logger.WithFields(map[string]interface{}{
+ "version": payload.Version,
+ "force": payload.Force,
+ "message": payload.Message,
+ }).Info("update_notification received")
+ out <- wsMsg{
+ kind: "update_notification",
+ version: payload.Version,
+ force: payload.Force,
+ }
+ case "integration_toggle":
+ logger.WithFields(map[string]interface{}{
+ "integration": payload.Integration,
+ "enabled": payload.Enabled,
+ }).Info("integration_toggle received")
+ out <- wsMsg{
+ kind: "integration_toggle",
+ integrationName: payload.Integration,
+ integrationEnabled: payload.Enabled,
+ }
+ case "compliance_scan":
+ // Validate profile ID to prevent command injection
+ if err := validateProfileID(payload.ProfileID); err != nil {
+ logger.WithError(err).WithField("profile_id", payload.ProfileID).Warn("Invalid profile ID in compliance_scan message")
+ continue
+ }
+ profileType := payload.ProfileType
+ if profileType == "" {
+ profileType = "all"
+ }
+ logger.WithFields(map[string]interface{}{
+ "profile_type": profileType,
+ "profile_id": payload.ProfileID,
+ "enable_remediation": payload.EnableRemediation,
+ }).Info("compliance_scan received")
+ out <- wsMsg{
+ kind: "compliance_scan",
+ profileType: profileType,
+ profileID: payload.ProfileID,
+ enableRemediation: payload.EnableRemediation,
+ fetchRemoteResources: payload.FetchRemoteResources,
+ }
+ case "upgrade_ssg":
+ logger.Info("upgrade_ssg received from WebSocket")
+ out <- wsMsg{kind: "upgrade_ssg"}
+ logger.Info("upgrade_ssg sent to message channel")
+ case "remediate_rule":
+ // Validate rule ID to prevent command injection
+ if err := validateRuleID(payload.RuleID); err != nil {
+ logger.WithError(err).WithField("rule_id", payload.RuleID).Warn("Invalid rule ID in remediate_rule message")
+ continue
+ }
+ logger.WithField("rule_id", payload.RuleID).Info("remediate_rule received")
+ out <- wsMsg{kind: "remediate_rule", ruleID: payload.RuleID}
+ case "docker_image_scan":
+ // Validate Docker image and container names to prevent command injection
+ if err := validateDockerImageName(payload.ImageName); err != nil {
+ logger.WithError(err).WithField("image_name", payload.ImageName).Warn("Invalid image name in docker_image_scan message")
+ continue
+ }
+ if err := validateDockerContainerName(payload.ContainerName); err != nil {
+ logger.WithError(err).WithField("container_name", payload.ContainerName).Warn("Invalid container name in docker_image_scan message")
+ continue
+ }
+ logger.WithFields(map[string]interface{}{
+ "image_name": payload.ImageName,
+ "container_name": payload.ContainerName,
+ "scan_all_images": payload.ScanAllImages,
+ }).Info("docker_image_scan received")
+ out <- wsMsg{
+ kind: "docker_image_scan",
+ imageName: payload.ImageName,
+ containerName: payload.ContainerName,
+ scanAllImages: payload.ScanAllImages,
+ }
+ case "set_compliance_on_demand_only":
+ logger.WithField("on_demand_only", payload.OnDemandOnly).Info("set_compliance_on_demand_only received")
+ out <- wsMsg{
+ kind: "set_compliance_on_demand_only",
+ complianceOnDemandOnly: payload.OnDemandOnly,
+ }
+ default:
+ if payload.Type != "" && payload.Type != "connected" {
+ logger.WithField("type", payload.Type).Warn("Unknown WebSocket message type")
}
}
}
@@ -469,6 +1086,253 @@ func toggleIntegration(integrationName string, enabled bool) error {
"enabled": enabled,
}).Info("Toggling integration")
+ // Handle compliance tools installation/removal
+ if integrationName == "compliance" {
+ // Create HTTP client for sending status updates
+ httpClient := client.New(cfgManager, logger)
+ ctx := context.Background()
+
+ components := make(map[string]string)
+ var overallStatus string
+ var statusMessage string
+
+ if enabled {
+ logger.Info("Compliance enabled - installing required tools...")
+ overallStatus = "installing"
+
+ // Send initial "installing" status
+ httpClient.SendIntegrationSetupStatus(ctx, &models.IntegrationSetupStatus{
+ Integration: "compliance",
+ Enabled: true,
+ Status: "installing",
+ Message: "Installing compliance tools...",
+ })
+
+ // Install OpenSCAP
+ openscapScanner := compliance.NewOpenSCAPScanner(logger)
+ if err := openscapScanner.EnsureInstalled(); err != nil {
+ logger.WithError(err).Warn("Failed to install OpenSCAP (will try again on next scan)")
+ components["openscap"] = "failed"
+ } else {
+ logger.Info("OpenSCAP installed successfully")
+ components["openscap"] = "ready"
+ }
+
+ // Pre-pull Docker Bench image only if Docker integration is enabled AND Docker is available
+ dockerIntegrationEnabled := cfgManager.IsIntegrationEnabled("docker")
+ if dockerIntegrationEnabled {
+ dockerBenchScanner := compliance.NewDockerBenchScanner(logger)
+ if dockerBenchScanner.IsAvailable() {
+ if err := dockerBenchScanner.EnsureInstalled(); err != nil {
+ logger.WithError(err).Warn("Failed to pre-pull Docker Bench image (will pull on first scan)")
+ components["docker-bench"] = "failed"
+ } else {
+ logger.Info("Docker Bench image pulled successfully")
+ components["docker-bench"] = "ready"
+ }
+ } else {
+ components["docker-bench"] = "unavailable"
+ }
+
+ // Install oscap-docker for container image CVE scanning
+ oscapDockerScanner := compliance.NewOscapDockerScanner(logger)
+ if !oscapDockerScanner.IsAvailable() {
+ if err := oscapDockerScanner.EnsureInstalled(); err != nil {
+ // Check if it's a platform limitation (not available on this OS) vs installation failure
+ errMsg := err.Error()
+ if strings.Contains(errMsg, "not available") || strings.Contains(errMsg, "not supported") {
+ logger.WithError(err).Info("oscap-docker not available on this platform")
+ components["oscap-docker"] = "unavailable"
+ } else {
+ logger.WithError(err).Warn("Failed to install oscap-docker (container CVE scanning won't be available)")
+ components["oscap-docker"] = "failed"
+ }
+ } else {
+ logger.Info("oscap-docker installed successfully")
+ components["oscap-docker"] = "ready"
+ }
+ } else {
+ logger.Info("oscap-docker already available")
+ components["oscap-docker"] = "ready"
+ }
+ } else {
+ logger.Debug("Docker integration not enabled, skipping Docker Bench and oscap-docker setup")
+ // Don't add docker-bench to components at all if integration is not enabled
+ }
+
+ // Determine overall status
+ allReady := true
+ for _, status := range components {
+ if status == "failed" {
+ allReady = false
+ break
+ }
+ }
+ if allReady {
+ overallStatus = "ready"
+ statusMessage = "Compliance tools installed and ready"
+ } else {
+ overallStatus = "partial"
+ statusMessage = "Some compliance tools failed to install"
+ }
+
+ // Get detailed scanner info to send with status
+ scannerDetails := openscapScanner.GetScannerDetails()
+
+ // Add Docker Bench and oscap-docker info if available
+ if dockerIntegrationEnabled {
+ dockerBenchScanner := compliance.NewDockerBenchScanner(logger)
+ scannerDetails.DockerBenchAvailable = dockerBenchScanner.IsAvailable()
+ if scannerDetails.DockerBenchAvailable {
+ scannerDetails.AvailableProfiles = append(scannerDetails.AvailableProfiles, models.ScanProfileInfo{
+ ID: "docker-bench",
+ Name: "Docker Bench for Security",
+ Description: "CIS Docker Benchmark security checks",
+ Type: "docker-bench",
+ })
+ }
+
+ // Add oscap-docker info for container image CVE scanning
+ oscapDockerScanner := compliance.NewOscapDockerScanner(logger)
+ scannerDetails.OscapDockerAvailable = oscapDockerScanner.IsAvailable()
+ if oscapDockerScanner.IsAvailable() {
+ scannerDetails.AvailableProfiles = append(scannerDetails.AvailableProfiles, models.ScanProfileInfo{
+ ID: "docker-image-cve",
+ Name: "Docker Image CVE Scan",
+ Description: "Scan Docker images for known CVEs using OpenSCAP",
+ Type: "oscap-docker",
+ Category: "docker",
+ })
+ }
+ }
+
+ // Send final status with scanner info
+ httpClient.SendIntegrationSetupStatus(ctx, &models.IntegrationSetupStatus{
+ Integration: "compliance",
+ Enabled: enabled,
+ Status: overallStatus,
+ Message: statusMessage,
+ Components: components,
+ ScannerInfo: scannerDetails,
+ })
+ return nil // Skip the generic status send below
+
+ } else {
+ logger.Info("Compliance disabled - removing tools...")
+ overallStatus = "removing"
+
+ // Send initial "removing" status
+ httpClient.SendIntegrationSetupStatus(ctx, &models.IntegrationSetupStatus{
+ Integration: "compliance",
+ Enabled: false,
+ Status: "removing",
+ Message: "Removing compliance tools...",
+ })
+
+ // Remove OpenSCAP packages
+ openscapScanner := compliance.NewOpenSCAPScanner(logger)
+ if err := openscapScanner.Cleanup(); err != nil {
+ logger.WithError(err).Warn("Failed to remove OpenSCAP packages")
+ components["openscap"] = "cleanup-failed"
+ } else {
+ logger.Info("OpenSCAP packages removed successfully")
+ components["openscap"] = "removed"
+ }
+
+ // Clean up Docker Bench images
+ dockerBenchScanner := compliance.NewDockerBenchScanner(logger)
+ if dockerBenchScanner.IsAvailable() {
+ if err := dockerBenchScanner.Cleanup(); err != nil {
+ logger.WithError(err).Debug("Failed to cleanup Docker Bench image")
+ components["docker-bench"] = "cleanup-failed"
+ } else {
+ components["docker-bench"] = "removed"
+ }
+ }
+
+ overallStatus = "disabled"
+ statusMessage = "Compliance disabled and tools removed"
+ logger.Info("Compliance cleanup complete")
+
+ // Send final status update for disable
+ httpClient.SendIntegrationSetupStatus(ctx, &models.IntegrationSetupStatus{
+ Integration: "compliance",
+ Enabled: enabled,
+ Status: overallStatus,
+ Message: statusMessage,
+ Components: components,
+ })
+ }
+ }
+
+ // Handle Docker Bench and oscap-docker installation when Docker is enabled AND Compliance is already enabled
+ if integrationName == "docker" && enabled {
+ if cfgManager.IsIntegrationEnabled("compliance") {
+ logger.Info("Docker enabled with Compliance already active - setting up Docker scanning tools...")
+ httpClient := client.New(cfgManager, logger)
+ ctx := context.Background()
+
+ openscapScanner := compliance.NewOpenSCAPScanner(logger)
+ scannerDetails := openscapScanner.GetScannerDetails()
+
+ // Setup Docker Bench
+ dockerBenchScanner := compliance.NewDockerBenchScanner(logger)
+ if dockerBenchScanner.IsAvailable() {
+ if err := dockerBenchScanner.EnsureInstalled(); err != nil {
+ logger.WithError(err).Warn("Failed to pre-pull Docker Bench image (will pull on first scan)")
+ } else {
+ logger.Info("Docker Bench image pulled successfully")
+ scannerDetails.DockerBenchAvailable = true
+ scannerDetails.AvailableProfiles = append(scannerDetails.AvailableProfiles, models.ScanProfileInfo{
+ ID: "docker-bench",
+ Name: "Docker Bench for Security",
+ Description: "CIS Docker Benchmark security checks",
+ Type: "docker-bench",
+ })
+ }
+ } else {
+ logger.Warn("Docker daemon not available - Docker Bench cannot be used")
+ }
+
+ // Setup oscap-docker for container image CVE scanning
+ oscapDockerScanner := compliance.NewOscapDockerScanner(logger)
+ if !oscapDockerScanner.IsAvailable() {
+ if err := oscapDockerScanner.EnsureInstalled(); err != nil {
+ logger.WithError(err).Warn("Failed to install oscap-docker (container CVE scanning won't be available)")
+ } else {
+ logger.Info("oscap-docker installed successfully")
+ scannerDetails.OscapDockerAvailable = true
+ scannerDetails.AvailableProfiles = append(scannerDetails.AvailableProfiles, models.ScanProfileInfo{
+ ID: "docker-image-cve",
+ Name: "Docker Image CVE Scan",
+ Description: "Scan Docker images for known CVEs using OpenSCAP",
+ Type: "oscap-docker",
+ Category: "docker",
+ })
+ }
+ } else {
+ logger.Info("oscap-docker already available")
+ scannerDetails.OscapDockerAvailable = true
+ scannerDetails.AvailableProfiles = append(scannerDetails.AvailableProfiles, models.ScanProfileInfo{
+ ID: "docker-image-cve",
+ Name: "Docker Image CVE Scan",
+ Description: "Scan Docker images for known CVEs using OpenSCAP",
+ Type: "oscap-docker",
+ Category: "docker",
+ })
+ }
+
+ // Send updated compliance status with Docker scanning tools
+ httpClient.SendIntegrationSetupStatus(ctx, &models.IntegrationSetupStatus{
+ Integration: "compliance",
+ Enabled: true,
+ Status: "ready",
+ Message: "Docker scanning tools now available",
+ ScannerInfo: scannerDetails,
+ })
+ }
+ }
+
// Update config.yml
if err := cfgManager.SetIntegrationEnabled(integrationName, enabled); err != nil {
return fmt.Errorf("failed to update config: %w", err)
@@ -498,7 +1362,19 @@ func toggleIntegration(integrationName string, enabled bool) error {
// Instead, we'll create a helper script that runs after we exit
logger.Debug("Detected OpenRC, scheduling service restart via helper script")
+ // SECURITY: Ensure /etc/patchmon directory exists with restrictive permissions
+ // Using 0700 to prevent other users from reading/writing to this directory
+ if err := os.MkdirAll("/etc/patchmon", 0700); err != nil {
+ logger.WithError(err).Warn("Failed to create /etc/patchmon directory, will try anyway")
+ }
+
// Create a helper script that will restart the service after we exit
+ // SECURITY: TOCTOU mitigation measures:
+ // 1) Use random suffix to prevent predictable paths
+ // 2) Use O_EXCL flag for atomic creation (fail if file exists)
+ // 3) 0700 permissions on dir and file (owner-only)
+ // 4) Script is deleted immediately after execution
+ // 5) Verify no symlink attacks before execution
helperScript := `#!/bin/sh
# Wait a moment for the current process to exit
sleep 2
@@ -507,27 +1383,69 @@ rc-service patchmon-agent restart 2>&1 || rc-service patchmon-agent start 2>&1
# Clean up this script
rm -f "$0"
`
- helperPath := "/etc/patchmon/patchmon-restart-helper.sh"
- if err := os.WriteFile(helperPath, []byte(helperScript), 0755); err != nil {
+ // Generate random suffix to prevent predictable path attacks
+ randomBytes := make([]byte, 8)
+ if _, err := rand.Read(randomBytes); err != nil {
+ logger.WithError(err).Warn("Failed to generate random suffix, using fallback")
+ randomBytes = []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}
+ }
+ helperPath := filepath.Join("/etc/patchmon", fmt.Sprintf("restart-%s.sh", hex.EncodeToString(randomBytes)))
+
+ // SECURITY: Verify the directory is not a symlink (prevent symlink attacks)
+ dirInfo, err := os.Lstat("/etc/patchmon")
+ if err == nil && dirInfo.Mode()&os.ModeSymlink != 0 {
+ logger.Warn("Security: /etc/patchmon is a symlink, refusing to create helper script")
+ os.Exit(0) // Fall through to exit approach
+ }
+
+ // SECURITY: Use O_EXCL to atomically create file (fail if exists - prevents race conditions)
+ file, err := os.OpenFile(helperPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0700)
+ if err != nil {
logger.WithError(err).Warn("Failed to create restart helper script, will exit and rely on OpenRC auto-restart")
// Fall through to exit approach
} else {
- // Execute the helper script in background (detached from current process)
- // Use 'sh -c' with nohup to ensure it runs after we exit
- cmd := exec.Command("sh", "-c", fmt.Sprintf("nohup %s > /dev/null 2>&1 &", helperPath))
- if err := cmd.Start(); err != nil {
- logger.WithError(err).Warn("Failed to start restart helper script, will exit and rely on OpenRC auto-restart")
- // Clean up script
- if removeErr := os.Remove(helperPath); removeErr != nil {
- logger.WithError(removeErr).Debug("Failed to remove helper script")
- }
+ // Write the script content to the file
+ if _, err := file.WriteString(helperScript); err != nil {
+ logger.WithError(err).Warn("Failed to write restart helper script")
+ file.Close()
+ os.Remove(helperPath)
// Fall through to exit approach
} else {
- logger.Info("Scheduled service restart via helper script, exiting now...")
- // Give the helper script a moment to start
- time.Sleep(500 * time.Millisecond)
- // Exit gracefully - the helper script will restart the service
- os.Exit(0)
+ file.Close()
+
+ // SECURITY: Verify the file we're about to execute is the one we created
+ // Check it's a regular file, not a symlink that was swapped in
+ fileInfo, err := os.Lstat(helperPath)
+ if err != nil || fileInfo.Mode()&os.ModeSymlink != 0 {
+ logger.Warn("Security: helper script may have been tampered with, refusing to execute")
+ os.Remove(helperPath)
+ os.Exit(0)
+ }
+
+ // Execute the helper script in background (detached from current process)
+ // SECURITY: Avoid shell interpretation by executing directly with nohup
+ cmd := exec.Command("nohup", helperPath)
+ cmd.Stdout = nil
+ cmd.Stderr = nil
+ // Detach from parent process group to ensure script continues after we exit
+ cmd.SysProcAttr = &syscall.SysProcAttr{
+ Setpgid: true,
+ Pgid: 0,
+ }
+ if err := cmd.Start(); err != nil {
+ logger.WithError(err).Warn("Failed to start restart helper script, will exit and rely on OpenRC auto-restart")
+ // Clean up script
+ if removeErr := os.Remove(helperPath); removeErr != nil {
+ logger.WithError(removeErr).Debug("Failed to remove helper script")
+ }
+ // Fall through to exit approach
+ } else {
+ logger.Info("Scheduled service restart via helper script, exiting now...")
+ // Give the helper script a moment to start
+ time.Sleep(500 * time.Millisecond)
+ // Exit gracefully - the helper script will restart the service
+ os.Exit(0)
+ }
}
}
@@ -549,3 +1467,277 @@ rm -f "$0"
return nil
}
}
+
+// runComplianceScan runs an on-demand compliance scan and sends results to server (backwards compatible)
+func runComplianceScan(profileType string) error {
+ return runComplianceScanWithOptions(&models.ComplianceScanOptions{
+ ProfileID: profileType,
+ })
+}
+
+// sendComplianceProgress sends a progress update via the global channel
+func sendComplianceProgress(phase, profileName, message string, progress float64, errMsg string) {
+ select {
+ case complianceProgressChan <- ComplianceScanProgress{
+ Phase: phase,
+ ProfileName: profileName,
+ Message: message,
+ Progress: progress,
+ Error: errMsg,
+ }:
+ // Successfully sent
+ default:
+ // Channel full or no listener, skip to avoid blocking
+ logger.Debug("Compliance progress channel full, skipping update")
+ }
+}
+
+// runComplianceScanWithOptions runs an on-demand compliance scan with options and sends results to server
+func runComplianceScanWithOptions(options *models.ComplianceScanOptions) error {
+ profileName := options.ProfileID
+ if profileName == "" {
+ profileName = "default"
+ }
+
+ logger.WithFields(map[string]interface{}{
+ "profile_id": options.ProfileID,
+ "enable_remediation": options.EnableRemediation,
+ }).Info("Starting on-demand compliance scan")
+
+ // Send progress: started
+ sendComplianceProgress("started", profileName, "Initializing compliance scan...", 5, "")
+
+ // Create compliance integration
+ complianceInteg := compliance.New(logger)
+ // Set Docker integration status - Docker Bench only runs if Docker integration is enabled
+ complianceInteg.SetDockerIntegrationEnabled(cfgManager.IsIntegrationEnabled("docker"))
+
+ if !complianceInteg.IsAvailable() {
+ sendComplianceProgress("failed", profileName, "Compliance scanning not available", 0, "compliance scanning not available on this system")
+ return fmt.Errorf("compliance scanning not available on this system")
+ }
+
+ // Send progress: evaluating
+ sendComplianceProgress("evaluating", profileName, "Running OpenSCAP evaluation (this may take several minutes)...", 15, "")
+
+ // Run the scan with options (25 minutes to allow for complex systems)
+ // OpenSCAP CIS Level 1 Server can take 15+ minutes on systems with many packages
+ // Docker Bench needs additional time after OpenSCAP completes
+ ctx, cancel := context.WithTimeout(context.Background(), 25*time.Minute)
+ defer cancel()
+
+ integrationData, err := complianceInteg.CollectWithOptions(ctx, options)
+ if err != nil {
+ sendComplianceProgress("failed", profileName, "Scan failed", 0, err.Error())
+ return fmt.Errorf("compliance scan failed: %w", err)
+ }
+
+ // Send progress: parsing
+ sendComplianceProgress("parsing", profileName, "Processing scan results...", 80, "")
+
+ // Extract compliance data
+ complianceData, ok := integrationData.Data.(*models.ComplianceData)
+ if !ok {
+ sendComplianceProgress("failed", profileName, "Failed to extract compliance data", 0, "failed to extract compliance data")
+ return fmt.Errorf("failed to extract compliance data")
+ }
+
+ if len(complianceData.Scans) == 0 {
+ logger.Info("No compliance scans to send")
+ sendComplianceProgress("completed", profileName, "Scan completed (no results)", 100, "")
+ return nil
+ }
+
+ // Send progress: sending
+ sendComplianceProgress("sending", profileName, "Uploading results to server...", 90, "")
+
+ // Get system info
+ systemDetector := system.New(logger)
+ hostname, _ := systemDetector.GetHostname()
+ machineID := systemDetector.GetMachineID()
+
+ // Create payload
+ payload := &models.CompliancePayload{
+ ComplianceData: *complianceData,
+ Hostname: hostname,
+ MachineID: machineID,
+ AgentVersion: version.Version,
+ }
+
+ // Debug: log what we're about to send
+ for i, scan := range payload.Scans {
+ statusCounts := map[string]int{}
+ for _, r := range scan.Results {
+ statusCounts[r.Status]++
+ }
+ logger.WithFields(map[string]interface{}{
+ "scan_index": i,
+ "profile_name": scan.ProfileName,
+ "profile_type": scan.ProfileType,
+ "total_results": len(scan.Results),
+ "result_statuses": statusCounts,
+ "scan_passed": scan.Passed,
+ "scan_failed": scan.Failed,
+ "scan_warnings": scan.Warnings,
+ "scan_skipped": scan.Skipped,
+ }).Info("DEBUG: Compliance payload scan details before sending")
+ }
+
+ // Send to server
+ httpClient := client.New(cfgManager, logger)
+ sendCtx, sendCancel := context.WithTimeout(context.Background(), 2*time.Minute)
+ defer sendCancel()
+
+ response, err := httpClient.SendComplianceData(sendCtx, payload)
+ if err != nil {
+ sendComplianceProgress("failed", profileName, "Failed to send results", 0, err.Error())
+ return fmt.Errorf("failed to send compliance data: %w", err)
+ }
+
+ // Send progress: completed with score
+ score := float64(0)
+ if len(complianceData.Scans) > 0 {
+ score = complianceData.Scans[0].Score
+ }
+ completedMsg := fmt.Sprintf("Scan completed! Score: %.1f%%", score)
+ sendComplianceProgress("completed", profileName, completedMsg, 100, "")
+
+ logFields := map[string]interface{}{
+ "scans_received": response.ScansReceived,
+ "message": response.Message,
+ }
+ if options.EnableRemediation {
+ logFields["remediation_enabled"] = true
+ }
+ logger.WithFields(logFields).Info("On-demand compliance scan results sent to server")
+
+ return nil
+}
+
+// runDockerImageScan runs a CVE scan on Docker images using oscap-docker
+func runDockerImageScan(imageName, containerName string, scanAllImages bool) error {
+ logger.WithFields(map[string]interface{}{
+ "image_name": imageName,
+ "container_name": containerName,
+ "scan_all_images": scanAllImages,
+ }).Info("Starting Docker image CVE scan")
+
+ // Check if Docker integration is enabled
+ if !cfgManager.IsIntegrationEnabled("docker") {
+ return fmt.Errorf("docker integration is not enabled")
+ }
+
+ // Check if compliance integration is enabled (required for oscap-docker)
+ if !cfgManager.IsIntegrationEnabled("compliance") {
+ return fmt.Errorf("compliance integration is not enabled (required for oscap-docker)")
+ }
+
+ // Create oscap-docker scanner
+ oscapDockerScanner := compliance.NewOscapDockerScanner(logger)
+ if !oscapDockerScanner.IsAvailable() {
+ sendComplianceProgress("failed", "Docker Image CVE Scan", "oscap-docker not available", 0, "oscap-docker is not installed or Docker is not running")
+ return fmt.Errorf("oscap-docker is not available")
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute)
+ defer cancel()
+
+ var scans []*models.ComplianceScan
+
+ if scanAllImages {
+ // Scan all Docker images
+ sendComplianceProgress("started", "Docker Image CVE Scan", "Scanning all Docker images for CVEs...", 5, "")
+
+ results, err := oscapDockerScanner.ScanAllImages(ctx)
+ if err != nil {
+ sendComplianceProgress("failed", "Docker Image CVE Scan", "Failed to scan images", 0, err.Error())
+ return fmt.Errorf("failed to scan all images: %w", err)
+ }
+ scans = results
+ } else if imageName != "" {
+ // Scan specific image
+ sendComplianceProgress("started", "Docker Image CVE Scan", fmt.Sprintf("Scanning image %s for CVEs...", imageName), 5, "")
+
+ scan, err := oscapDockerScanner.ScanImage(ctx, imageName)
+ if err != nil {
+ sendComplianceProgress("failed", "Docker Image CVE Scan", "Failed to scan image", 0, err.Error())
+ return fmt.Errorf("failed to scan image %s: %w", imageName, err)
+ }
+ scans = append(scans, scan)
+ } else if containerName != "" {
+ // Scan specific container
+ sendComplianceProgress("started", "Docker Image CVE Scan", fmt.Sprintf("Scanning container %s for CVEs...", containerName), 5, "")
+
+ scan, err := oscapDockerScanner.ScanContainer(ctx, containerName)
+ if err != nil {
+ sendComplianceProgress("failed", "Docker Image CVE Scan", "Failed to scan container", 0, err.Error())
+ return fmt.Errorf("failed to scan container %s: %w", containerName, err)
+ }
+ scans = append(scans, scan)
+ } else {
+ return fmt.Errorf("no image or container specified for scan")
+ }
+
+ if len(scans) == 0 {
+ sendComplianceProgress("completed", "Docker Image CVE Scan", "No images to scan", 100, "")
+ logger.Info("No Docker images to scan")
+ return nil
+ }
+
+ // Send progress: parsing
+ sendComplianceProgress("parsing", "Docker Image CVE Scan", "Processing scan results...", 80, "")
+
+ // Convert pointer slice to value slice for ComplianceData
+ scanValues := make([]models.ComplianceScan, len(scans))
+ for i, scan := range scans {
+ scanValues[i] = *scan
+ }
+
+ // Create compliance data structure
+ complianceData := &models.ComplianceData{
+ Scans: scanValues,
+ }
+
+ // Send progress: sending
+ sendComplianceProgress("sending", "Docker Image CVE Scan", "Uploading results to server...", 90, "")
+
+ // Get system info
+ systemDetector := system.New(logger)
+ hostname, _ := systemDetector.GetHostname()
+ machineID := systemDetector.GetMachineID()
+
+ // Create payload
+ payload := &models.CompliancePayload{
+ ComplianceData: *complianceData,
+ Hostname: hostname,
+ MachineID: machineID,
+ AgentVersion: version.Version,
+ }
+
+ // Send to server
+ httpClient := client.New(cfgManager, logger)
+ sendCtx, sendCancel := context.WithTimeout(context.Background(), 2*time.Minute)
+ defer sendCancel()
+
+ response, err := httpClient.SendComplianceData(sendCtx, payload)
+ if err != nil {
+ sendComplianceProgress("failed", "Docker Image CVE Scan", "Failed to send results", 0, err.Error())
+ return fmt.Errorf("failed to send Docker image scan data: %w", err)
+ }
+
+ // Send progress: completed
+ totalCVEs := 0
+ for _, scan := range scans {
+ totalCVEs += scan.Failed
+ }
+ completedMsg := fmt.Sprintf("Scan completed! Found %d CVEs across %d images", totalCVEs, len(scans))
+ sendComplianceProgress("completed", "Docker Image CVE Scan", completedMsg, 100, "")
+
+ logger.WithFields(map[string]interface{}{
+ "scans_received": response.ScansReceived,
+ "images_scanned": len(scans),
+ "cves_found": totalCVEs,
+ }).Info("Docker image CVE scan results sent to server")
+
+ return nil
+}
diff --git a/cmd/patchmon-agent/commands/version_update.go b/cmd/patchmon-agent/commands/version_update.go
index 9eb59c4..77a25c7 100644
--- a/cmd/patchmon-agent/commands/version_update.go
+++ b/cmd/patchmon-agent/commands/version_update.go
@@ -2,8 +2,10 @@ package commands
import (
"context"
+ "crypto/rand"
"crypto/sha256"
"crypto/tls"
+ "encoding/hex"
"encoding/json"
"fmt"
"io"
@@ -13,9 +15,11 @@ import (
"path/filepath"
"runtime"
"strings"
+ "syscall"
"time"
"patchmon-agent/internal/config"
+ "patchmon-agent/internal/utils"
"patchmon-agent/internal/version"
"github.com/spf13/cobra"
@@ -43,6 +47,7 @@ type ServerVersionInfo struct {
AutoUpdateDisabledReason string `json:"autoUpdateDisabledReason"`
LastChecked string `json:"lastChecked"`
SupportedArchitectures []string `json:"supportedArchitectures"`
+ Hash string `json:"hash"` // SHA256 hash for integrity verification
}
// checkVersionCmd represents the check-version command
@@ -164,6 +169,24 @@ func updateAgent() error {
return fmt.Errorf("no binary data received from server")
}
+ // SECURITY: Verify binary integrity against server-provided hash
+ // This prevents supply chain attacks where binary could be tampered during download
+ // SECURITY: Hash verification is MANDATORY for binary integrity
+ if versionInfo == nil || versionInfo.Hash == "" {
+ logger.Error("Server did not provide hash for binary verification - refusing to update")
+ return fmt.Errorf("binary hash not provided by server - refusing to update without integrity verification (update your PatchMon server)")
+ }
+
+ actualHash := fmt.Sprintf("%x", sha256.Sum256(newAgentData))
+ if actualHash != versionInfo.Hash {
+ logger.WithFields(map[string]interface{}{
+ "expected": versionInfo.Hash,
+ "actual": actualHash,
+ }).Error("Binary hash verification failed - possible tampering detected")
+ return fmt.Errorf("binary hash mismatch: expected %s, got %s", versionInfo.Hash, actualHash)
+ }
+ logger.WithField("hash", actualHash).Info("Binary integrity verified successfully")
+
// Get the new version from server version info (more reliable than parsing binary output)
newVersion := currentVersion // Default to current if we can't determine
if versionInfo != nil && versionInfo.LatestVersion != "" {
@@ -311,8 +334,20 @@ func getServerVersionInfo() (*ServerVersionInfo, error) {
},
}
- // Configure for insecure SSL if needed
+ // SECURITY: Configure for insecure SSL if needed (NOT RECOMMENDED)
+ // Even with hash verification, TLS provides important protections
if cfg.SkipSSLVerify {
+ // SECURITY: Block skip_ssl_verify in production environments
+ if utils.IsProductionEnvironment() {
+ logger.Error("╔══════════════════════════════════════════════════════════════════╗")
+ logger.Error("║ SECURITY ERROR: skip_ssl_verify is BLOCKED in production! ║")
+ logger.Error("║ Set PATCHMON_ENV to 'development' to enable insecure mode. ║")
+ logger.Error("║ This setting cannot be used when PATCHMON_ENV=production ║")
+ logger.Error("╚══════════════════════════════════════════════════════════════════╝")
+ return nil, fmt.Errorf("skip_ssl_verify is blocked in production environment")
+ }
+
+ logger.Warn("⚠️ TLS verification disabled for version check - NOT RECOMMENDED")
httpClient.Transport = &http.Transport{
ResponseHeaderTimeout: 5 * time.Second,
TLSClientConfig: &tls.Config{
@@ -372,10 +407,29 @@ func getLatestBinaryFromServer() (*ServerVersionResponse, error) {
req.Header.Set("X-API-ID", credentials.APIID)
req.Header.Set("X-API-KEY", credentials.APIKey)
- // Configure HTTP client for insecure SSL if needed
+ // SECURITY: Configure HTTP client for insecure SSL if needed
+ // WARNING: This is dangerous for binary downloads even with hash verification!
+ // An attacker could provide both a malicious binary AND a matching hash.
+ // TLS ensures we're talking to the legitimate server.
httpClient := http.DefaultClient
if cfg.SkipSSLVerify {
- logger.Warn("⚠️ SSL certificate verification is disabled for binary download")
+ // SECURITY: Block skip_ssl_verify in production environments
+ if utils.IsProductionEnvironment() {
+ logger.Error("╔══════════════════════════════════════════════════════════════════╗")
+ logger.Error("║ SECURITY ERROR: skip_ssl_verify is BLOCKED in production! ║")
+ logger.Error("║ Set PATCHMON_ENV to 'development' to enable insecure mode. ║")
+ logger.Error("║ This setting cannot be used when PATCHMON_ENV=production ║")
+ logger.Error("╚══════════════════════════════════════════════════════════════════╝")
+ return nil, fmt.Errorf("skip_ssl_verify is blocked in production environment")
+ }
+
+ logger.Error("╔══════════════════════════════════════════════════════════════════╗")
+ logger.Error("║ CRITICAL: TLS verification DISABLED for binary download! ║")
+ logger.Error("║ This is a severe security risk - MITM attacks are possible. ║")
+ logger.Error("║ Hash verification provides some protection, but TLS ensures ║")
+ logger.Error("║ you're communicating with the legitimate server. ║")
+ logger.Error("║ Use a valid TLS certificate in production! ║")
+ logger.Error("╚══════════════════════════════════════════════════════════════════╝")
httpClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
@@ -399,12 +453,22 @@ func getLatestBinaryFromServer() (*ServerVersionResponse, error) {
return nil, fmt.Errorf("server returned status %d", resp.StatusCode)
}
- // Read the binary data
- binaryData, err := io.ReadAll(resp.Body)
+ // SECURITY: Limit binary download size to prevent DoS attacks
+ // Max 100MB should be more than enough for the agent binary
+ const maxBinarySize = 100 * 1024 * 1024
+ limitedReader := io.LimitReader(resp.Body, maxBinarySize+1)
+
+ // Read the binary data with size limit
+ binaryData, err := io.ReadAll(limitedReader)
if err != nil {
return nil, fmt.Errorf("failed to read binary data: %w", err)
}
+ // Check if we hit the size limit (read more than maxBinarySize)
+ if int64(len(binaryData)) > maxBinarySize {
+ return nil, fmt.Errorf("binary size exceeds maximum allowed (%d MB)", maxBinarySize/(1024*1024))
+ }
+
// Calculate hash
hash := fmt.Sprintf("%x", sha256.Sum256(binaryData))
@@ -430,7 +494,8 @@ func copyFile(src, dst string) error {
return err
}
- return os.WriteFile(dst, data, 0755)
+ // SECURITY: Use 0700 for backup files (owner-only access)
+ return os.WriteFile(dst, data, 0700)
}
// cleanupOldBackups removes old backup files, keeping only the last 3
@@ -523,8 +588,8 @@ func checkRecentUpdate() error {
func markRecentUpdate() {
updateMarkerPath := "/etc/patchmon/.last_update_timestamp"
- // Ensure directory exists
- if err := os.MkdirAll("/etc/patchmon", 0755); err != nil {
+ // SECURITY: Ensure directory exists with restrictive permissions
+ if err := os.MkdirAll("/etc/patchmon", 0700); err != nil {
logger.WithError(err).Debug("Could not create /etc/patchmon directory (non-critical)")
return
}
@@ -558,12 +623,19 @@ func restartService(executablePath, expectedVersion string) error {
// Instead, we'll create a helper script that runs after we exit
logger.Debug("Detected systemd, scheduling service restart via helper script")
- // Ensure /etc/patchmon directory exists
- if err := os.MkdirAll("/etc/patchmon", 0755); err != nil {
+ // SECURITY: Ensure /etc/patchmon directory exists with restrictive permissions
+ // Using 0700 to prevent other users from reading/writing to this directory
+ if err := os.MkdirAll("/etc/patchmon", 0700); err != nil {
logger.WithError(err).Warn("Failed to create /etc/patchmon directory, will try anyway")
}
// Create a helper script that will restart the service after we exit
+ // SECURITY: TOCTOU mitigation measures:
+ // 1) Use random suffix to prevent predictable paths
+ // 2) Use O_EXCL flag for atomic creation (fail if file exists)
+ // 3) 0700 permissions on dir and file (owner-only)
+ // 4) Script is deleted immediately after execution
+ // 5) Verify no symlink attacks before execution
helperScript := `#!/bin/sh
# Wait a moment for the current process to exit
sleep 2
@@ -572,27 +644,69 @@ systemctl restart patchmon-agent 2>&1 || systemctl start patchmon-agent 2>&1
# Clean up this script
rm -f "$0"
`
- helperPath := "/etc/patchmon/patchmon-restart-helper.sh"
- if err := os.WriteFile(helperPath, []byte(helperScript), 0755); err != nil {
+ // Generate random suffix to prevent predictable path attacks
+ randomBytes := make([]byte, 8)
+ if _, err := rand.Read(randomBytes); err != nil {
+ logger.WithError(err).Warn("Failed to generate random suffix, using fallback")
+ randomBytes = []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}
+ }
+ helperPath := filepath.Join("/etc/patchmon", fmt.Sprintf("restart-%s.sh", hex.EncodeToString(randomBytes)))
+
+ // SECURITY: Verify the directory is not a symlink (prevent symlink attacks)
+ dirInfo, err := os.Lstat("/etc/patchmon")
+ if err == nil && dirInfo.Mode()&os.ModeSymlink != 0 {
+ logger.Warn("Security: /etc/patchmon is a symlink, refusing to create helper script")
+ os.Exit(0) // Fall through to exit approach
+ }
+
+ // SECURITY: Use O_EXCL to atomically create file (fail if exists - prevents race conditions)
+ file, err := os.OpenFile(helperPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0700)
+ if err != nil {
logger.WithError(err).Warn("Failed to create restart helper script, will exit and rely on systemd auto-restart")
// Fall through to exit approach
} else {
- // Execute the helper script in background (detached from current process)
- // Use 'sh -c' with nohup to ensure it runs after we exit
- cmd := exec.Command("sh", "-c", fmt.Sprintf("nohup %s > /dev/null 2>&1 &", helperPath))
- if err := cmd.Start(); err != nil {
- logger.WithError(err).Warn("Failed to start restart helper script, will exit and rely on systemd auto-restart")
- // Clean up script
- if removeErr := os.Remove(helperPath); removeErr != nil {
- logger.WithError(removeErr).Debug("Failed to remove helper script")
- }
+ // Write the script content to the file
+ if _, err := file.WriteString(helperScript); err != nil {
+ logger.WithError(err).Warn("Failed to write restart helper script")
+ file.Close()
+ os.Remove(helperPath)
// Fall through to exit approach
} else {
- logger.Info("Scheduled service restart via helper script, exiting now...")
- // Give the helper script a moment to start
- time.Sleep(500 * time.Millisecond)
- // Exit gracefully - the helper script will restart the service
- os.Exit(0)
+ file.Close()
+
+ // SECURITY: Verify the file we're about to execute is the one we created
+ // Check it's a regular file, not a symlink that was swapped in
+ fileInfo, err := os.Lstat(helperPath)
+ if err != nil || fileInfo.Mode()&os.ModeSymlink != 0 {
+ logger.Warn("Security: helper script may have been tampered with, refusing to execute")
+ os.Remove(helperPath)
+ os.Exit(0)
+ }
+
+ // Execute the helper script in background (detached from current process)
+ // SECURITY: Avoid shell interpretation by executing directly with nohup
+ cmd := exec.Command("nohup", helperPath)
+ cmd.Stdout = nil
+ cmd.Stderr = nil
+ // Detach from parent process group to ensure script continues after we exit
+ cmd.SysProcAttr = &syscall.SysProcAttr{
+ Setpgid: true,
+ Pgid: 0,
+ }
+ if err := cmd.Start(); err != nil {
+ logger.WithError(err).Warn("Failed to start restart helper script, will exit and rely on systemd auto-restart")
+ // Clean up script
+ if removeErr := os.Remove(helperPath); removeErr != nil {
+ logger.WithError(removeErr).Debug("Failed to remove helper script")
+ }
+ // Fall through to exit approach
+ } else {
+ logger.Info("Scheduled service restart via helper script, exiting now...")
+ // Give the helper script a moment to start
+ time.Sleep(500 * time.Millisecond)
+ // Exit gracefully - the helper script will restart the service
+ os.Exit(0)
+ }
}
}
@@ -608,12 +722,19 @@ rm -f "$0"
// Instead, we'll create a helper script that runs after we exit
logger.Debug("Detected OpenRC, scheduling service restart via helper script")
- // Ensure /etc/patchmon directory exists
- if err := os.MkdirAll("/etc/patchmon", 0755); err != nil {
+ // SECURITY: Ensure /etc/patchmon directory exists with restrictive permissions
+ // Using 0700 to prevent other users from reading/writing to this directory
+ if err := os.MkdirAll("/etc/patchmon", 0700); err != nil {
logger.WithError(err).Warn("Failed to create /etc/patchmon directory, will try anyway")
}
// Create a helper script that will restart the service after we exit
+ // SECURITY: TOCTOU mitigation measures:
+ // 1) Use random suffix to prevent predictable paths
+ // 2) Use O_EXCL flag for atomic creation (fail if file exists)
+ // 3) 0700 permissions on dir and file (owner-only)
+ // 4) Script is deleted immediately after execution
+ // 5) Verify no symlink attacks before execution
helperScript := `#!/bin/sh
# Wait a moment for the current process to exit
sleep 2
@@ -622,27 +743,69 @@ rc-service patchmon-agent restart 2>&1 || rc-service patchmon-agent start 2>&1
# Clean up this script
rm -f "$0"
`
- helperPath := "/etc/patchmon/patchmon-restart-helper.sh"
- if err := os.WriteFile(helperPath, []byte(helperScript), 0755); err != nil {
+ // Generate random suffix to prevent predictable path attacks
+ randomBytes := make([]byte, 8)
+ if _, err := rand.Read(randomBytes); err != nil {
+ logger.WithError(err).Warn("Failed to generate random suffix, using fallback")
+ randomBytes = []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}
+ }
+ helperPath := filepath.Join("/etc/patchmon", fmt.Sprintf("restart-%s.sh", hex.EncodeToString(randomBytes)))
+
+ // SECURITY: Verify the directory is not a symlink (prevent symlink attacks)
+ dirInfo, err := os.Lstat("/etc/patchmon")
+ if err == nil && dirInfo.Mode()&os.ModeSymlink != 0 {
+ logger.Warn("Security: /etc/patchmon is a symlink, refusing to create helper script")
+ os.Exit(0) // Fall through to exit approach
+ }
+
+ // SECURITY: Use O_EXCL to atomically create file (fail if exists - prevents race conditions)
+ file, err := os.OpenFile(helperPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0700)
+ if err != nil {
logger.WithError(err).Warn("Failed to create restart helper script, will exit and rely on OpenRC auto-restart")
// Fall through to exit approach
} else {
- // Execute the helper script in background (detached from current process)
- // Use 'sh -c' with nohup to ensure it runs after we exit
- cmd := exec.Command("sh", "-c", fmt.Sprintf("nohup %s > /dev/null 2>&1 &", helperPath))
- if err := cmd.Start(); err != nil {
- logger.WithError(err).Warn("Failed to start restart helper script, will exit and rely on OpenRC auto-restart")
- // Clean up script
- if removeErr := os.Remove(helperPath); removeErr != nil {
- logger.WithError(removeErr).Debug("Failed to remove helper script")
- }
+ // Write the script content to the file
+ if _, err := file.WriteString(helperScript); err != nil {
+ logger.WithError(err).Warn("Failed to write restart helper script")
+ file.Close()
+ os.Remove(helperPath)
// Fall through to exit approach
} else {
- logger.Info("Scheduled service restart via helper script, exiting now...")
- // Give the helper script a moment to start
- time.Sleep(500 * time.Millisecond)
- // Exit gracefully - the helper script will restart the service
- os.Exit(0)
+ file.Close()
+
+ // SECURITY: Verify the file we're about to execute is the one we created
+ // Check it's a regular file, not a symlink that was swapped in
+ fileInfo, err := os.Lstat(helperPath)
+ if err != nil || fileInfo.Mode()&os.ModeSymlink != 0 {
+ logger.Warn("Security: helper script may have been tampered with, refusing to execute")
+ os.Remove(helperPath)
+ os.Exit(0)
+ }
+
+ // Execute the helper script in background (detached from current process)
+ // SECURITY: Avoid shell interpretation by executing directly with nohup
+ cmd := exec.Command("nohup", helperPath)
+ cmd.Stdout = nil
+ cmd.Stderr = nil
+ // Detach from parent process group to ensure script continues after we exit
+ cmd.SysProcAttr = &syscall.SysProcAttr{
+ Setpgid: true,
+ Pgid: 0,
+ }
+ if err := cmd.Start(); err != nil {
+ logger.WithError(err).Warn("Failed to start restart helper script, will exit and rely on OpenRC auto-restart")
+ // Clean up script
+ if removeErr := os.Remove(helperPath); removeErr != nil {
+ logger.WithError(removeErr).Debug("Failed to remove helper script")
+ }
+ // Fall through to exit approach
+ } else {
+ logger.Info("Scheduled service restart via helper script, exiting now...")
+ // Give the helper script a moment to start
+ time.Sleep(500 * time.Millisecond)
+ // Exit gracefully - the helper script will restart the service
+ os.Exit(0)
+ }
}
}
diff --git a/dist/patchmon-agent-linux-amd64 b/dist/patchmon-agent-linux-amd64
new file mode 100755
index 0000000..3bd40fd
Binary files /dev/null and b/dist/patchmon-agent-linux-amd64 differ
diff --git a/dist/patchmon-agent-linux-arm64 b/dist/patchmon-agent-linux-arm64
new file mode 100755
index 0000000..a7ef37b
Binary files /dev/null and b/dist/patchmon-agent-linux-arm64 differ
diff --git a/dist/patchmonenhanced-agent-linux-386 b/dist/patchmonenhanced-agent-linux-386
new file mode 100755
index 0000000..0b3c0d4
Binary files /dev/null and b/dist/patchmonenhanced-agent-linux-386 differ
diff --git a/dist/patchmonenhanced-agent-linux-amd64 b/dist/patchmonenhanced-agent-linux-amd64
new file mode 100755
index 0000000..b060eb5
Binary files /dev/null and b/dist/patchmonenhanced-agent-linux-amd64 differ
diff --git a/dist/patchmonenhanced-agent-linux-arm b/dist/patchmonenhanced-agent-linux-arm
new file mode 100755
index 0000000..4ca87be
Binary files /dev/null and b/dist/patchmonenhanced-agent-linux-arm differ
diff --git a/dist/patchmonenhanced-agent-linux-arm64 b/dist/patchmonenhanced-agent-linux-arm64
new file mode 100755
index 0000000..fe1096d
Binary files /dev/null and b/dist/patchmonenhanced-agent-linux-arm64 differ
diff --git a/go.mod b/go.mod
index 390fd8e..4e5a1ab 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module patchmon-agent
-go 1.25
+go 1.24.0
require (
github.com/docker/docker v28.5.1+incompatible
diff --git a/internal/client/client.go b/internal/client/client.go
index ee7ed8a..0e8968f 100644
--- a/internal/client/client.go
+++ b/internal/client/client.go
@@ -7,6 +7,7 @@ import (
"time"
"patchmon-agent/internal/config"
+ "patchmon-agent/internal/utils"
"patchmon-agent/pkg/models"
"github.com/go-resty/resty/v2"
@@ -21,6 +22,16 @@ type Client struct {
logger *logrus.Logger
}
+// truncateResponse truncates a response string to prevent leaking sensitive data in logs
+// SECURITY: Error messages should not include full response bodies which may contain
+// sensitive information like tokens, internal paths, or system details
+func truncateResponse(s string, maxLen int) string {
+ if len(s) <= maxLen {
+ return s
+ }
+ return s[:maxLen] + "... (truncated)"
+}
+
// New creates a new HTTP client
func New(configMgr *config.Manager, logger *logrus.Logger) *Client {
client := resty.New()
@@ -32,9 +43,25 @@ func New(configMgr *config.Manager, logger *logrus.Logger) *Client {
client.SetLogger(logger)
// Configure TLS based on skip_ssl_verify setting
+ // SECURITY WARNING: Disabling TLS verification exposes the agent to MITM attacks
cfg := configMgr.GetConfig()
if cfg.SkipSSLVerify {
- logger.Warn("⚠️ SSL certificate verification is disabled (skip_ssl_verify=true)")
+ // SECURITY: Block skip_ssl_verify in production environments
+ if utils.IsProductionEnvironment() {
+ logger.Error("╔══════════════════════════════════════════════════════════════════╗")
+ logger.Error("║ SECURITY ERROR: skip_ssl_verify is BLOCKED in production! ║")
+ logger.Error("║ Set PATCHMON_ENV to 'development' to enable insecure mode. ║")
+ logger.Error("║ This setting cannot be used when PATCHMON_ENV=production ║")
+ logger.Error("╚══════════════════════════════════════════════════════════════════╝")
+ logger.Fatal("Refusing to start with skip_ssl_verify=true in production environment")
+ }
+
+ logger.Error("╔══════════════════════════════════════════════════════════════════╗")
+ logger.Error("║ SECURITY WARNING: TLS certificate verification is DISABLED! ║")
+ logger.Error("║ This exposes the agent to man-in-the-middle attacks. ║")
+ logger.Error("║ An attacker could intercept and modify communications. ║")
+ logger.Error("║ Do NOT use skip_ssl_verify=true in production environments! ║")
+ logger.Error("╚══════════════════════════════════════════════════════════════════╝")
client.SetTLSClientConfig(&tls.Config{
InsecureSkipVerify: true,
})
@@ -70,7 +97,8 @@ func (c *Client) Ping(ctx context.Context) (*models.PingResponse, error) {
}
if resp.StatusCode() != 200 {
- return nil, fmt.Errorf("ping request failed with status %d: %s", resp.StatusCode(), resp.String())
+ c.logger.WithField("response", resp.String()).Debug("Full error response from ping request")
+ return nil, fmt.Errorf("ping request failed with status %d: %s", resp.StatusCode(), truncateResponse(resp.String(), 200))
}
result, ok := resp.Result().(*models.PingResponse)
@@ -104,7 +132,8 @@ func (c *Client) SendUpdate(ctx context.Context, payload *models.ReportPayload)
}
if resp.StatusCode() != 200 {
- return nil, fmt.Errorf("update request failed with status %d: %s", resp.StatusCode(), resp.String())
+ c.logger.WithField("response", resp.String()).Debug("Full error response from update request")
+ return nil, fmt.Errorf("update request failed with status %d: %s", resp.StatusCode(), truncateResponse(resp.String(), 200))
}
result, ok := resp.Result().(*models.UpdateResponse)
@@ -134,7 +163,8 @@ func (c *Client) GetUpdateInterval(ctx context.Context) (*models.UpdateIntervalR
}
if resp.StatusCode() != 200 {
- return nil, fmt.Errorf("update interval request failed with status %d: %s", resp.StatusCode(), resp.String())
+ c.logger.WithField("response", resp.String()).Debug("Full error response from update interval request")
+ return nil, fmt.Errorf("update interval request failed with status %d: %s", resp.StatusCode(), truncateResponse(resp.String(), 200))
}
result, ok := resp.Result().(*models.UpdateIntervalResponse)
@@ -168,7 +198,8 @@ func (c *Client) SendDockerData(ctx context.Context, payload *models.DockerPaylo
}
if resp.StatusCode() != 200 {
- return nil, fmt.Errorf("docker data request failed with status %d: %s", resp.StatusCode(), resp.String())
+ c.logger.WithField("response", resp.String()).Debug("Full error response from docker data request")
+ return nil, fmt.Errorf("docker data request failed with status %d: %s", resp.StatusCode(), truncateResponse(resp.String(), 200))
}
result, ok := resp.Result().(*models.DockerResponse)
@@ -198,7 +229,8 @@ func (c *Client) GetIntegrationStatus(ctx context.Context) (*models.IntegrationS
}
if resp.StatusCode() != 200 {
- return nil, fmt.Errorf("integration status request failed with status %d: %s", resp.StatusCode(), resp.String())
+ c.logger.WithField("response", resp.String()).Debug("Full error response from integration status request")
+ return nil, fmt.Errorf("integration status request failed with status %d: %s", resp.StatusCode(), truncateResponse(resp.String(), 200))
}
result, ok := resp.Result().(*models.IntegrationStatusResponse)
@@ -209,6 +241,36 @@ func (c *Client) GetIntegrationStatus(ctx context.Context) (*models.IntegrationS
return result, nil
}
+// SendIntegrationSetupStatus sends the setup status of an integration to the server
+func (c *Client) SendIntegrationSetupStatus(ctx context.Context, status *models.IntegrationSetupStatus) error {
+ url := fmt.Sprintf("%s/api/%s/hosts/integration-status", c.config.PatchmonServer, c.config.APIVersion)
+
+ c.logger.WithFields(logrus.Fields{
+ "integration": status.Integration,
+ "enabled": status.Enabled,
+ "status": status.Status,
+ }).Info("Sending integration setup status to server")
+
+ resp, err := c.client.R().
+ SetContext(ctx).
+ SetHeader("Content-Type", "application/json").
+ SetHeader("X-API-ID", c.credentials.APIID).
+ SetHeader("X-API-KEY", c.credentials.APIKey).
+ SetBody(status).
+ Post(url)
+
+ if err != nil {
+ return fmt.Errorf("integration setup status request failed: %w", err)
+ }
+
+ if resp.StatusCode() != 200 {
+ return fmt.Errorf("integration setup status request failed with status %d", resp.StatusCode())
+ }
+
+ c.logger.Info("Integration setup status sent successfully")
+ return nil
+}
+
// SendDockerStatusEvent sends a real-time Docker container status event via WebSocket
func (c *Client) SendDockerStatusEvent(event *models.DockerStatusEvent) error {
// This will be called by the WebSocket connection in the serve command
@@ -221,3 +283,39 @@ func (c *Client) SendDockerStatusEvent(event *models.DockerStatusEvent) error {
}).Debug("Docker status event")
return nil
}
+
+// SendComplianceData sends compliance scan data to the server
+func (c *Client) SendComplianceData(ctx context.Context, payload *models.CompliancePayload) (*models.ComplianceResponse, error) {
+ url := fmt.Sprintf("%s/api/%s/compliance/scans", c.config.PatchmonServer, c.config.APIVersion)
+
+ c.logger.WithFields(logrus.Fields{
+ "url": url,
+ "method": "POST",
+ "scans": len(payload.Scans),
+ }).Debug("Sending compliance data to server")
+
+ resp, err := c.client.R().
+ SetContext(ctx).
+ SetHeader("Content-Type", "application/json").
+ SetHeader("X-API-ID", c.credentials.APIID).
+ SetHeader("X-API-KEY", c.credentials.APIKey).
+ SetBody(payload).
+ SetResult(&models.ComplianceResponse{}).
+ Post(url)
+
+ if err != nil {
+ return nil, fmt.Errorf("compliance data request failed: %w", err)
+ }
+
+ if resp.StatusCode() != 200 {
+ c.logger.WithField("response", resp.String()).Debug("Full error response from compliance data request")
+ return nil, fmt.Errorf("compliance data request failed with status %d: %s", resp.StatusCode(), truncateResponse(resp.String(), 200))
+ }
+
+ result, ok := resp.Result().(*models.ComplianceResponse)
+ if !ok {
+ return nil, fmt.Errorf("invalid response format")
+ }
+
+ return result, nil
+}
diff --git a/internal/config/config.go b/internal/config/config.go
index f9d2161..c3fe12e 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -142,7 +142,7 @@ func (m *Manager) LoadCredentials() error {
return nil
}
-// SaveCredentials saves API credentials to file
+// SaveCredentials saves API credentials to file using atomic write to prevent TOCTOU race
func (m *Manager) SaveCredentials(apiID, apiKey string) error {
if err := m.setupDirectories(); err != nil {
return err
@@ -153,17 +153,57 @@ func (m *Manager) SaveCredentials(apiID, apiKey string) error {
APIKey: apiKey,
}
- credViper := viper.New()
- credViper.Set("api_id", m.credentials.APIID)
- credViper.Set("api_key", m.credentials.APIKey)
+ // Generate YAML content manually to avoid viper's default file creation
+ content := fmt.Sprintf("api_id: %s\napi_key: %s\n", apiID, apiKey)
+
+ // Use atomic write pattern to prevent TOCTOU race condition:
+ // 1. Write to temp file with secure permissions from the start
+ // 2. Atomically rename to target file
+ dir := filepath.Dir(m.config.CredentialsFile)
- if err := credViper.WriteConfigAs(m.config.CredentialsFile); err != nil {
- return fmt.Errorf("error writing credentials file: %w", err)
+ // Create temp file in same directory (required for atomic rename)
+ // Use O_CREATE|O_EXCL to prevent race on temp file creation
+ // File is created with 0600 permissions from the start
+ tmpFile, err := os.CreateTemp(dir, ".credentials-*.tmp")
+ if err != nil {
+ return fmt.Errorf("error creating temp credentials file: %w", err)
}
+ tmpPath := tmpFile.Name()
- // Set restrictive permissions
- if err := os.Chmod(m.config.CredentialsFile, 0600); err != nil {
- return fmt.Errorf("error setting credentials file permissions: %w", err)
+ // Clean up temp file on any error
+ defer func() {
+ if tmpFile != nil {
+ tmpFile.Close()
+ }
+ // Remove temp file if it still exists (rename failed or error occurred)
+ os.Remove(tmpPath)
+ }()
+
+ // Set secure permissions on temp file before writing content
+ if err := tmpFile.Chmod(0600); err != nil {
+ return fmt.Errorf("error setting temp file permissions: %w", err)
+ }
+
+ // Write credentials to temp file
+ if _, err := tmpFile.WriteString(content); err != nil {
+ return fmt.Errorf("error writing credentials to temp file: %w", err)
+ }
+
+ // Ensure data is flushed to disk before rename
+ if err := tmpFile.Sync(); err != nil {
+ return fmt.Errorf("error syncing temp file: %w", err)
+ }
+
+ // Close the file before rename (required on some systems)
+ if err := tmpFile.Close(); err != nil {
+ return fmt.Errorf("error closing temp file: %w", err)
+ }
+ tmpFile = nil // Prevent double-close in defer
+
+ // Atomic rename - this is the only operation that exposes the file
+ // Since we set permissions before writing, no race window exists
+ if err := os.Rename(tmpPath, m.config.CredentialsFile); err != nil {
+ return fmt.Errorf("error renaming credentials file: %w", err)
}
return nil
@@ -197,6 +237,7 @@ func (m *Manager) SaveConfig() error {
}
}
configViper.Set("integrations", m.config.Integrations)
+ configViper.Set("compliance_on_demand_only", m.config.ComplianceOnDemandOnly)
if err := configViper.WriteConfigAs(m.configFile); err != nil {
return fmt.Errorf("error writing config file: %w", err)
@@ -242,7 +283,20 @@ func (m *Manager) SetIntegrationEnabled(name string, enabled bool) error {
return m.SaveConfig()
}
+// IsComplianceOnDemandOnly returns true if compliance should only run on-demand (not during scheduled reports)
+func (m *Manager) IsComplianceOnDemandOnly() bool {
+ return m.config.ComplianceOnDemandOnly
+}
+
+// SetComplianceOnDemandOnly sets whether compliance should only run on-demand
+func (m *Manager) SetComplianceOnDemandOnly(onDemandOnly bool) error {
+ m.config.ComplianceOnDemandOnly = onDemandOnly
+ return m.SaveConfig()
+}
+
// setupDirectories creates necessary directories
+// SECURITY: Use restrictive permissions (0750) for config directories
+// This prevents unauthorized users from reading agent configuration
func (m *Manager) setupDirectories() error {
dirs := []string{
filepath.Dir(m.configFile),
@@ -251,7 +305,8 @@ func (m *Manager) setupDirectories() error {
}
for _, dir := range dirs {
- if err := os.MkdirAll(dir, 0755); err != nil {
+ // Use 0750 - owner full access, group read/execute, no world access
+ if err := os.MkdirAll(dir, 0750); err != nil {
return fmt.Errorf("error creating directory %s: %w", dir, err)
}
}
diff --git a/internal/constants/constants.go b/internal/constants/constants.go
index a85d79e..aaec0a9 100644
--- a/internal/constants/constants.go
+++ b/internal/constants/constants.go
@@ -8,19 +8,8 @@ const (
SELinuxEnforcing = "enforcing" // Will be mapped to enabled for API compatibility
)
-// OS type constants
-const (
- OSTypeDebian = "debian"
- OSTypeUbuntu = "ubuntu"
- OSTypeRHEL = "rhel"
- OSTypeCentOS = "centos"
- OSTypeFedora = "fedora"
- OSTypeRocky = "rocky"
- OSTypeAlma = "almalinux"
- OSTypePop = "pop"
- OSTypeMint = "linuxmint"
- OSTypeElementary = "elementary"
-)
+// Note: OS type detection uses string literals directly in system package
+// These constants are reserved for future use if needed
// Architecture constants
const (
@@ -36,7 +25,6 @@ const (
NetTypeEthernet = "ethernet"
NetTypeWiFi = "wifi"
NetTypeBridge = "bridge"
- NetTypeLoopback = "loopback"
)
// IP address families
diff --git a/internal/integrations/compliance/compliance.go b/internal/integrations/compliance/compliance.go
new file mode 100644
index 0000000..588b16c
--- /dev/null
+++ b/internal/integrations/compliance/compliance.go
@@ -0,0 +1,198 @@
+package compliance
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "patchmon-agent/internal/utils"
+ "patchmon-agent/pkg/models"
+
+ "github.com/sirupsen/logrus"
+)
+
+const integrationName = "compliance"
+
+// Integration implements the Integration interface for compliance scanning
+type Integration struct {
+ logger *logrus.Logger
+ openscap *OpenSCAPScanner
+ dockerBench *DockerBenchScanner
+ dockerIntegrationEnabled bool
+}
+
+// New creates a new Compliance integration
+func New(logger *logrus.Logger) *Integration {
+ return &Integration{
+ logger: logger,
+ openscap: NewOpenSCAPScanner(logger),
+ dockerBench: NewDockerBenchScanner(logger),
+ dockerIntegrationEnabled: false,
+ }
+}
+
+// SetDockerIntegrationEnabled sets whether Docker integration is enabled
+// Docker Bench scans will only run if this is true AND Docker is available
+func (c *Integration) SetDockerIntegrationEnabled(enabled bool) {
+ c.dockerIntegrationEnabled = enabled
+}
+
+// Name returns the integration name
+func (c *Integration) Name() string {
+ return integrationName
+}
+
+// Priority returns the collection priority (lower = higher priority)
+func (c *Integration) Priority() int {
+ return 20 // Lower priority than docker (10) since scans can be slow
+}
+
+// SupportsRealtime indicates if this integration supports real-time monitoring
+func (c *Integration) SupportsRealtime() bool {
+ return false // Compliance scans are not real-time
+}
+
+// IsAvailable checks if compliance scanning is available on this system
+func (c *Integration) IsAvailable() bool {
+ // Available if either OpenSCAP or Docker Bench is available
+ oscapAvail := c.openscap.IsAvailable()
+ dockerBenchAvail := c.dockerBench.IsAvailable()
+
+ if oscapAvail {
+ c.logger.Debug("OpenSCAP is available for compliance scanning")
+ }
+ if dockerBenchAvail {
+ c.logger.Debug("Docker Bench is available for compliance scanning")
+ }
+
+ return oscapAvail || dockerBenchAvail
+}
+
+// Collect gathers compliance scan data
+func (c *Integration) Collect(ctx context.Context) (*models.IntegrationData, error) {
+ return c.CollectWithOptions(ctx, nil)
+}
+
+// CollectWithOptions gathers compliance scan data with scan options (remediation, etc.)
+func (c *Integration) CollectWithOptions(ctx context.Context, options *models.ComplianceScanOptions) (*models.IntegrationData, error) {
+ startTime := time.Now()
+
+ c.logger.Info("Starting compliance scan collection...")
+
+ // Docker Bench is only available if Docker integration is enabled AND Docker is installed
+ dockerBenchEffectivelyAvailable := c.dockerIntegrationEnabled && c.dockerBench.IsAvailable()
+
+ complianceData := &models.ComplianceData{
+ Scans: make([]models.ComplianceScan, 0),
+ OSInfo: c.openscap.GetOSInfo(),
+ ScannerInfo: models.ComplianceScannerInfo{
+ OpenSCAPAvailable: c.openscap.IsAvailable(),
+ OpenSCAPVersion: c.openscap.GetVersion(),
+ DockerBenchAvailable: dockerBenchEffectivelyAvailable,
+ AvailableProfiles: c.openscap.GetAvailableProfiles(),
+ },
+ }
+
+ // Determine which scans to run based on profile ID
+ profileID := ""
+ if options != nil && options.ProfileID != "" {
+ profileID = options.ProfileID
+ }
+
+ // Check if this is a Docker Bench specific scan
+ isDockerBenchOnly := profileID == "docker-bench"
+
+ // Run OpenSCAP scan if available and not a Docker Bench only request
+ if c.openscap.IsAvailable() && !isDockerBenchOnly {
+ var scan *models.ComplianceScan
+ var err error
+
+ if options != nil && options.EnableRemediation {
+ c.logger.Info("Running OpenSCAP CIS benchmark scan with remediation enabled...")
+ scan, err = c.openscap.RunScanWithOptions(ctx, options)
+ } else {
+ c.logger.Info("Running OpenSCAP CIS benchmark scan...")
+ scanProfileID := "level1_server"
+ if profileID != "" {
+ scanProfileID = profileID
+ }
+ scan, err = c.openscap.RunScan(ctx, scanProfileID)
+ }
+
+ if err != nil {
+ c.logger.WithError(err).Warn("OpenSCAP scan failed")
+ // Add failed scan result
+ complianceData.Scans = append(complianceData.Scans, models.ComplianceScan{
+ ProfileName: "level1_server",
+ ProfileType: "openscap",
+ Status: "failed",
+ StartedAt: startTime,
+ Error: err.Error(),
+ })
+ } else {
+ complianceData.Scans = append(complianceData.Scans, *scan)
+ logFields := logrus.Fields{
+ "profile": scan.ProfileName,
+ "score": fmt.Sprintf("%.1f%%", scan.Score),
+ "passed": scan.Passed,
+ "failed": scan.Failed,
+ }
+ if scan.RemediationApplied {
+ logFields["remediation_count"] = scan.RemediationCount
+ }
+ c.logger.WithFields(logFields).Info("OpenSCAP scan completed")
+ }
+ }
+
+ // Run Docker Bench scan if Docker integration is enabled AND Docker is available
+ // Always run if docker-bench profile is specifically selected, or if running all profiles
+ runDockerBench := dockerBenchEffectivelyAvailable && (isDockerBenchOnly || profileID == "" || profileID == "all")
+ if runDockerBench {
+ c.logger.Info("Running Docker Bench for Security scan...")
+ scan, err := c.dockerBench.RunScan(ctx)
+ if err != nil {
+ c.logger.WithError(err).Warn("Docker Bench scan failed")
+ // Add failed scan result with truncated error message
+ errMsg := err.Error()
+ if len(errMsg) > 500 {
+ errMsg = errMsg[:500] + "... (truncated)"
+ }
+ now := time.Now()
+ complianceData.Scans = append(complianceData.Scans, models.ComplianceScan{
+ ProfileName: "Docker Bench for Security",
+ ProfileType: "docker-bench",
+ Status: "failed",
+ StartedAt: startTime,
+ CompletedAt: &now,
+ Error: errMsg,
+ })
+ } else {
+ complianceData.Scans = append(complianceData.Scans, *scan)
+ c.logger.WithFields(logrus.Fields{
+ "profile": scan.ProfileName,
+ "score": fmt.Sprintf("%.1f%%", scan.Score),
+ "passed": scan.Passed,
+ "failed": scan.Failed,
+ "warnings": scan.Warnings,
+ }).Info("Docker Bench scan completed")
+ }
+ }
+
+ executionTime := time.Since(startTime).Seconds()
+
+ return &models.IntegrationData{
+ Name: c.Name(),
+ Enabled: true,
+ Data: complianceData,
+ CollectedAt: utils.GetCurrentTimeUTC(),
+ ExecutionTime: executionTime,
+ }, nil
+}
+
+// UpgradeSSGContent upgrades the SCAP Security Guide content packages
+func (c *Integration) UpgradeSSGContent() error {
+ if c.openscap == nil {
+ return fmt.Errorf("OpenSCAP scanner not initialized")
+ }
+ return c.openscap.UpgradeSSGContent()
+}
diff --git a/internal/integrations/compliance/docker_bench.go b/internal/integrations/compliance/docker_bench.go
new file mode 100644
index 0000000..323f04c
--- /dev/null
+++ b/internal/integrations/compliance/docker_bench.go
@@ -0,0 +1,489 @@
+package compliance
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "os"
+ "os/exec"
+ "regexp"
+ "strings"
+ "time"
+
+ "patchmon-agent/pkg/models"
+
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ dockerBinary = "docker"
+ // Docker Bench for Security image
+ // Using jauderho's maintained image - the official docker/docker-bench-security is deprecated
+ // and uses an ancient Docker client (API 1.38) incompatible with modern Docker daemons (API 1.44+)
+ dockerBenchImage = "jauderho/docker-bench-security:latest"
+)
+
+// DockerBenchScanner handles Docker Bench for Security scanning
+type DockerBenchScanner struct {
+ logger *logrus.Logger
+ available bool
+}
+
+// NewDockerBenchScanner creates a new Docker Bench scanner
+func NewDockerBenchScanner(logger *logrus.Logger) *DockerBenchScanner {
+ s := &DockerBenchScanner{
+ logger: logger,
+ }
+ s.checkAvailability()
+ return s
+}
+
+// IsAvailable returns whether Docker Bench is available
+func (s *DockerBenchScanner) IsAvailable() bool {
+ return s.available
+}
+
+// checkAvailability checks if Docker is available for running Docker Bench
+func (s *DockerBenchScanner) checkAvailability() {
+ // Check if docker binary exists
+ _, err := exec.LookPath(dockerBinary)
+ if err != nil {
+ s.logger.Debug("Docker binary not found")
+ s.available = false
+ return
+ }
+
+ // Check if Docker daemon is running
+ cmd := exec.Command(dockerBinary, "info")
+ if err := cmd.Run(); err != nil {
+ s.logger.Debug("Docker daemon not responding")
+ s.available = false
+ return
+ }
+
+ s.available = true
+ s.logger.Debug("Docker is available for Docker Bench scanning")
+}
+
+// RunScan executes a Docker Bench for Security scan
+func (s *DockerBenchScanner) RunScan(ctx context.Context) (*models.ComplianceScan, error) {
+ if !s.available {
+ return nil, fmt.Errorf("Docker is not available")
+ }
+
+ startTime := time.Now()
+
+ s.logger.WithField("image", dockerBenchImage).Info("Pulling Docker Bench for Security image...")
+
+ // Pull the latest Docker Bench image
+ pullCmd := exec.CommandContext(ctx, dockerBinary, "pull", dockerBenchImage)
+ if output, err := pullCmd.CombinedOutput(); err != nil {
+ s.logger.WithError(err).WithField("output", string(output)).Warn("Failed to pull Docker Bench image, attempting to use existing image")
+
+ // Check if image exists locally
+ checkCmd := exec.CommandContext(ctx, dockerBinary, "images", "-q", dockerBenchImage)
+ checkOutput, checkErr := checkCmd.Output()
+ if checkErr != nil || strings.TrimSpace(string(checkOutput)) == "" {
+ return nil, fmt.Errorf("Docker Bench image not available and pull failed: %w", err)
+ }
+ s.logger.Info("Using existing Docker Bench image")
+ } else {
+ s.logger.Info("Docker Bench image pulled successfully")
+ }
+
+ // Run Docker Bench
+ // NOTE: These elevated privileges are necessary for Docker Bench to inspect host configuration.
+ args := []string{
+ "run", "--rm",
+ "--net", "host",
+ "--pid", "host",
+ "--userns", "host",
+ "--cap-add", "audit_control",
+ }
+
+ // Find the Docker socket - check common locations
+ dockerSocket := ""
+ socketPaths := []string{
+ "/var/run/docker.sock",
+ "/run/docker.sock",
+ "/docker.sock", // Sometimes mounted here in containers
+ }
+
+ // Check DOCKER_HOST environment variable first
+ if dockerHost := os.Getenv("DOCKER_HOST"); dockerHost != "" {
+ if strings.HasPrefix(dockerHost, "unix://") {
+ socketPath := strings.TrimPrefix(dockerHost, "unix://")
+ if _, err := os.Stat(socketPath); err == nil {
+ dockerSocket = socketPath
+ s.logger.WithField("socket", dockerSocket).Debug("Using Docker socket from DOCKER_HOST")
+ }
+ }
+ }
+
+ // If not found via env, check common paths
+ if dockerSocket == "" {
+ for _, path := range socketPaths {
+ if _, err := os.Stat(path); err == nil {
+ dockerSocket = path
+ s.logger.WithField("socket", dockerSocket).Info("Found Docker socket")
+ break
+ }
+ }
+ }
+
+ if dockerSocket == "" {
+ return nil, fmt.Errorf("Docker socket not found at any known location")
+ }
+
+ // Verify socket is accessible
+ socketInfo, err := os.Stat(dockerSocket)
+ if err != nil {
+ return nil, fmt.Errorf("Docker socket not accessible: %w", err)
+ }
+ s.logger.WithFields(logrus.Fields{
+ "socket": dockerSocket,
+ "mode": socketInfo.Mode().String(),
+ }).Info("Docker socket verified")
+
+ // Required mounts - socket needs read-write for Docker Bench to query daemon
+ requiredMounts := []string{
+ "/etc:/etc:ro",
+ "/var/lib:/var/lib:ro",
+ dockerSocket + ":/var/run/docker.sock", // Map found socket to expected location in container
+ }
+
+ // Optional mounts - only add if path exists
+ optionalMounts := map[string]string{
+ "/lib/systemd/system": "/lib/systemd/system:/lib/systemd/system:ro",
+ "/usr/bin/containerd": "/usr/bin/containerd:/usr/bin/containerd:ro",
+ "/usr/bin/runc": "/usr/bin/runc:/usr/bin/runc:ro",
+ "/usr/lib/systemd": "/usr/lib/systemd:/usr/lib/systemd:ro",
+ }
+
+ // Add required mounts
+ for _, mount := range requiredMounts {
+ args = append(args, "-v", mount)
+ }
+
+ // Add optional mounts only if source path exists
+ for path, mount := range optionalMounts {
+ if _, err := os.Stat(path); err == nil {
+ args = append(args, "-v", mount)
+ } else {
+ s.logger.WithField("path", path).Debug("Optional mount path not found, skipping")
+ }
+ }
+
+ // -b: disable colors, -p: print remediation measures
+ args = append(args, "--label", "docker_bench_security", dockerBenchImage, "-b", "-p")
+
+ s.logger.WithField("command", "docker "+strings.Join(args, " ")).Info("Running Docker Bench for Security...")
+
+ cmd := exec.CommandContext(ctx, dockerBinary, args...)
+ output, err := cmd.CombinedOutput()
+
+ outputStr := string(output)
+ outputLen := len(outputStr)
+
+ if err != nil {
+ if ctx.Err() != nil {
+ return nil, fmt.Errorf("scan cancelled: %w", ctx.Err())
+ }
+ // Docker Bench may exit non-zero on failures, parse output anyway
+ s.logger.WithError(err).WithField("output_length", outputLen).Debug("Docker Bench exited with error, parsing output")
+ }
+
+ // Log output for debugging if it's short (likely an error)
+ if outputLen == 0 {
+ s.logger.Warn("Docker Bench produced no output - container may have failed to start")
+ } else if outputLen < 500 {
+ s.logger.WithField("output", outputStr).Debug("Docker Bench produced short output")
+ } else {
+ s.logger.WithField("output_length", outputLen).Debug("Docker Bench output captured")
+ }
+
+ // Parse the output
+ scan := s.parseOutput(outputStr)
+ scan.StartedAt = startTime
+ now := time.Now()
+ scan.CompletedAt = &now
+ scan.Status = "completed"
+
+ // Log warning if no results were parsed
+ if scan.TotalRules == 0 && outputLen > 0 {
+ // Log first 500 chars to help debug parsing issues
+ preview := outputStr
+ if len(preview) > 500 {
+ preview = preview[:500] + "..."
+ }
+ s.logger.WithField("output_preview", preview).Warn("Docker Bench output received but no rules parsed - check output format")
+ }
+
+ return scan, nil
+}
+
+// parseOutput parses Docker Bench output
+func (s *DockerBenchScanner) parseOutput(output string) *models.ComplianceScan {
+ scan := &models.ComplianceScan{
+ ProfileName: "Docker Bench for Security",
+ ProfileType: "docker-bench",
+ Results: make([]models.ComplianceResult, 0),
+ }
+
+ // Debug: track status counts as we parse
+ debugStatusCounts := map[string]int{}
+
+ // Parse patterns
+ // [PASS] 1.1.1 - Ensure a separate partition for containers has been created
+ // [WARN] 1.1.2 - Ensure only trusted users are allowed to control Docker daemon
+ // [INFO] 1.1.3 - Ensure auditing is configured for the Docker daemon
+ // [NOTE] 4.5 - Ensure Content trust for Docker is Enabled
+
+ patterns := map[string]*regexp.Regexp{
+ "pass": regexp.MustCompile(`\[PASS\]\s+(\d+\.\d+(?:\.\d+)?)\s+-\s+(.+)`),
+ "warn": regexp.MustCompile(`\[WARN\]\s+(\d+\.\d+(?:\.\d+)?)\s+-\s+(.+)`),
+ "info": regexp.MustCompile(`\[INFO\]\s+(\d+\.\d+(?:\.\d+)?)\s+-\s+(.+)`),
+ "note": regexp.MustCompile(`\[NOTE\]\s+(\d+\.\d+(?:\.\d+)?)\s+-\s+(.+)`),
+ }
+
+ // Pattern for remediation lines (printed with -p flag)
+ remediationPattern := regexp.MustCompile(`^\s+\*\s+Remediation:\s*(.+)`)
+ // Pattern for detail/finding lines
+ detailPattern := regexp.MustCompile(`^\s+\*\s+(.+)`)
+ // Pattern for continuation lines (indented text without bullet)
+ continuationPattern := regexp.MustCompile(`^\s{6,}(.+)`)
+
+ scanner := bufio.NewScanner(strings.NewReader(output))
+ currentSection := ""
+ var lastResultIdx int = -1
+ inRemediation := false // Track if we're reading multi-line remediation
+
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ // Check for remediation line (follows a check result)
+ if lastResultIdx >= 0 {
+ if matches := remediationPattern.FindStringSubmatch(line); matches != nil {
+ scan.Results[lastResultIdx].Remediation = strings.TrimSpace(matches[1])
+ inRemediation = true
+ continue
+ }
+ // Check for continuation of remediation text (deeply indented lines)
+ if inRemediation {
+ if matches := continuationPattern.FindStringSubmatch(line); matches != nil {
+ // Append to existing remediation
+ scan.Results[lastResultIdx].Remediation += " " + strings.TrimSpace(matches[1])
+ continue
+ } else if strings.TrimSpace(line) == "" {
+ // Empty line ends remediation section
+ inRemediation = false
+ } else if !strings.HasPrefix(strings.TrimSpace(line), "*") && !strings.HasPrefix(line, "[") {
+ // Non-bullet continuation line
+ scan.Results[lastResultIdx].Remediation += " " + strings.TrimSpace(line)
+ continue
+ } else {
+ inRemediation = false
+ }
+ }
+ // Check for detail/finding lines (e.g., "* Running as root: container_name")
+ if matches := detailPattern.FindStringSubmatch(line); matches != nil {
+ detail := strings.TrimSpace(matches[1])
+ // Skip if it's a remediation line we already handled
+ if !strings.HasPrefix(detail, "Remediation:") {
+ if scan.Results[lastResultIdx].Finding == "" {
+ scan.Results[lastResultIdx].Finding = detail
+ } else {
+ scan.Results[lastResultIdx].Finding += "; " + detail
+ }
+ }
+ continue
+ }
+ }
+
+ // Detect section headers (e.g., "[INFO] 1 - Host Configuration")
+ if strings.Contains(line, "[INFO]") && !strings.Contains(line, " - ") {
+ // Section header, extract section name
+ parts := strings.SplitN(line, " ", 3)
+ if len(parts) >= 2 {
+ currentSection = strings.TrimSpace(parts[1])
+ }
+ lastResultIdx = -1
+ inRemediation = false
+ continue
+ }
+
+ // Check each pattern
+ for status, pattern := range patterns {
+ if matches := pattern.FindStringSubmatch(line); matches != nil {
+ ruleID := matches[1]
+ title := strings.TrimSpace(matches[2])
+
+ // Map status
+ resultStatus := s.mapStatus(status)
+
+ // Debug: track what we're actually parsing
+ debugStatusCounts[resultStatus]++
+
+ // Update counters
+ switch resultStatus {
+ case "pass":
+ scan.Passed++
+ case "fail":
+ scan.Failed++
+ case "warn":
+ scan.Warnings++
+ // Debug: log when we find a warning
+ s.logger.WithFields(logrus.Fields{
+ "rule_id": ruleID,
+ "title": title,
+ "status": resultStatus,
+ }).Debug("Parsed Docker Bench warning")
+ case "skip":
+ scan.Skipped++
+ }
+ scan.TotalRules++
+
+ // Determine section from rule ID
+ section := s.getSectionFromID(ruleID, currentSection)
+
+ scan.Results = append(scan.Results, models.ComplianceResult{
+ RuleID: ruleID,
+ Title: title,
+ Status: resultStatus,
+ Section: section,
+ })
+ lastResultIdx = len(scan.Results) - 1
+ inRemediation = false // Reset for new result
+ break
+ }
+ }
+ }
+
+ // Calculate score
+ if scan.TotalRules > 0 {
+ applicable := scan.Passed + scan.Failed + scan.Warnings
+ if applicable > 0 {
+ scan.Score = float64(scan.Passed) / float64(applicable) * 100
+ }
+ }
+
+ // Debug: log parsed results summary
+ resultStatusCounts := map[string]int{}
+ for _, r := range scan.Results {
+ resultStatusCounts[r.Status]++
+ }
+ s.logger.WithFields(logrus.Fields{
+ "parse_counts": debugStatusCounts,
+ "result_counts": resultStatusCounts,
+ "total_results": len(scan.Results),
+ "scan_passed": scan.Passed,
+ "scan_failed": scan.Failed,
+ "scan_warnings": scan.Warnings,
+ "scan_skipped": scan.Skipped,
+ "scan_total": scan.TotalRules,
+ }).Info("Docker Bench parsing complete - debug status comparison")
+
+ return scan
+}
+
+// mapStatus maps Docker Bench status to our status
+func (s *DockerBenchScanner) mapStatus(status string) string {
+ switch status {
+ case "pass":
+ return "pass"
+ case "warn":
+ return "warn"
+ case "info":
+ return "skip"
+ case "note":
+ return "skip"
+ default:
+ return "skip"
+ }
+}
+
+// getSectionFromID extracts section name from rule ID
+func (s *DockerBenchScanner) getSectionFromID(ruleID string, currentSection string) string {
+ // Docker Bench sections:
+ // 1 - Host Configuration
+ // 2 - Docker daemon configuration
+ // 3 - Docker daemon configuration files
+ // 4 - Container Images and Build File
+ // 5 - Container Runtime
+ // 6 - Docker Security Operations
+ // 7 - Docker Swarm Configuration
+
+ sections := map[string]string{
+ "1": "Host Configuration",
+ "2": "Docker Daemon Configuration",
+ "3": "Docker Daemon Configuration Files",
+ "4": "Container Images and Build File",
+ "5": "Container Runtime",
+ "6": "Docker Security Operations",
+ "7": "Docker Swarm Configuration",
+ }
+
+ // Get first digit of rule ID
+ if len(ruleID) > 0 {
+ firstDigit := string(ruleID[0])
+ if section, exists := sections[firstDigit]; exists {
+ return section
+ }
+ }
+
+ return currentSection
+}
+
+// EnsureInstalled pre-pulls the Docker Bench image if Docker is available
+func (s *DockerBenchScanner) EnsureInstalled() error {
+ // Re-check availability
+ s.checkAvailability()
+
+ if !s.available {
+ return fmt.Errorf("Docker is not available - Docker Bench requires Docker to run")
+ }
+
+ s.logger.Info("Pre-pulling Docker Bench for Security image...")
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
+ defer cancel()
+
+ pullCmd := exec.CommandContext(ctx, dockerBinary, "pull", dockerBenchImage)
+ output, err := pullCmd.CombinedOutput()
+ if err != nil {
+ s.logger.WithError(err).WithField("output", string(output)).Warn("Failed to pull Docker Bench image")
+ return fmt.Errorf("failed to pull Docker Bench image: %w", err)
+ }
+
+ s.logger.Info("Docker Bench image pulled successfully")
+ return nil
+}
+
+// Cleanup removes the Docker Bench image to free up space
+func (s *DockerBenchScanner) Cleanup() error {
+ if !s.available {
+ s.logger.Debug("Docker not available, nothing to clean up")
+ return nil
+ }
+
+ s.logger.Info("Removing Docker Bench for Security image...")
+
+ ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
+ defer cancel()
+
+ // Remove the image
+ removeCmd := exec.CommandContext(ctx, dockerBinary, "rmi", dockerBenchImage)
+ output, err := removeCmd.CombinedOutput()
+ if err != nil {
+ // Image might not exist, which is fine
+ if strings.Contains(string(output), "No such image") {
+ s.logger.Debug("Docker Bench image already removed")
+ return nil
+ }
+ s.logger.WithError(err).WithField("output", string(output)).Warn("Failed to remove Docker Bench image")
+ return fmt.Errorf("failed to remove Docker Bench image: %w", err)
+ }
+
+ s.logger.Info("Docker Bench image removed successfully")
+ return nil
+}
diff --git a/internal/integrations/compliance/openscap.go b/internal/integrations/compliance/openscap.go
new file mode 100644
index 0000000..9bfa334
--- /dev/null
+++ b/internal/integrations/compliance/openscap.go
@@ -0,0 +1,1724 @@
+package compliance
+
+import (
+ "archive/zip"
+ "bufio"
+ "context"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "time"
+
+ "patchmon-agent/pkg/models"
+
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ oscapBinary = "oscap"
+ scapContentDir = "/usr/share/xml/scap/ssg/content"
+ osReleasePath = "/etc/os-release"
+)
+
+// Profile mappings for different OS families
+var profileMappings = map[string]map[string]string{
+ "level1_server": {
+ "ubuntu": "xccdf_org.ssgproject.content_profile_cis_level1_server",
+ "debian": "xccdf_org.ssgproject.content_profile_cis_level1_server",
+ "rhel": "xccdf_org.ssgproject.content_profile_cis",
+ "centos": "xccdf_org.ssgproject.content_profile_cis",
+ "rocky": "xccdf_org.ssgproject.content_profile_cis",
+ "alma": "xccdf_org.ssgproject.content_profile_cis",
+ "fedora": "xccdf_org.ssgproject.content_profile_cis",
+ "sles": "xccdf_org.ssgproject.content_profile_cis",
+ "opensuse": "xccdf_org.ssgproject.content_profile_cis",
+ },
+ "level2_server": {
+ "ubuntu": "xccdf_org.ssgproject.content_profile_cis_level2_server",
+ "debian": "xccdf_org.ssgproject.content_profile_cis_level2_server",
+ "rhel": "xccdf_org.ssgproject.content_profile_cis_server_l1",
+ "centos": "xccdf_org.ssgproject.content_profile_cis_server_l1",
+ "rocky": "xccdf_org.ssgproject.content_profile_cis_server_l1",
+ "alma": "xccdf_org.ssgproject.content_profile_cis_server_l1",
+ },
+}
+
+// OpenSCAPScanner handles OpenSCAP compliance scanning
+type OpenSCAPScanner struct {
+ logger *logrus.Logger
+ osInfo models.ComplianceOSInfo
+ available bool
+ version string
+}
+
+// NewOpenSCAPScanner creates a new OpenSCAP scanner
+func NewOpenSCAPScanner(logger *logrus.Logger) *OpenSCAPScanner {
+ s := &OpenSCAPScanner{
+ logger: logger,
+ }
+ s.osInfo = s.detectOS()
+ s.checkAvailability()
+ return s
+}
+
+// IsAvailable returns whether OpenSCAP is available
+func (s *OpenSCAPScanner) IsAvailable() bool {
+ return s.available
+}
+
+// GetVersion returns the OpenSCAP version
+func (s *OpenSCAPScanner) GetVersion() string {
+ return s.version
+}
+
+// GetOSInfo returns detected OS information
+func (s *OpenSCAPScanner) GetOSInfo() models.ComplianceOSInfo {
+ return s.osInfo
+}
+
+// GetContentFile returns the path to the content file being used
+func (s *OpenSCAPScanner) GetContentFilePath() string {
+ return s.getContentFile()
+}
+
+// GetContentPackageVersion returns the SSG content version
+// First checks for GitHub-installed version, then falls back to package manager
+func (s *OpenSCAPScanner) GetContentPackageVersion() string {
+ // First check for GitHub-installed version marker
+ githubVersion := s.getInstalledSSGVersion()
+ if githubVersion != "" {
+ return githubVersion
+ }
+
+ // Fall back to package manager version
+ var cmd *exec.Cmd
+
+ switch s.osInfo.Family {
+ case "debian":
+ cmd = exec.Command("dpkg-query", "-W", "-f=${Version}", "ssg-base")
+ case "rhel":
+ cmd = exec.Command("rpm", "-q", "--qf", "%{VERSION}-%{RELEASE}", "scap-security-guide")
+ case "suse":
+ cmd = exec.Command("rpm", "-q", "--qf", "%{VERSION}-%{RELEASE}", "scap-security-guide")
+ default:
+ return ""
+ }
+
+ output, err := cmd.Output()
+ if err != nil {
+ return ""
+ }
+ return strings.TrimSpace(string(output))
+}
+
+// DiscoverProfiles returns all available profiles from the SCAP content file
+func (s *OpenSCAPScanner) DiscoverProfiles() []models.ScanProfileInfo {
+ contentFile := s.getContentFile()
+ if contentFile == "" {
+ s.logger.Debug("No content file available, returning default profiles")
+ return s.getDefaultProfiles()
+ }
+
+ // Run oscap info to get profile list
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, oscapBinary, "info", "--profiles", contentFile)
+ output, err := cmd.Output()
+ if err != nil {
+ s.logger.WithError(err).Debug("Failed to get profiles from oscap info, using defaults")
+ return s.getDefaultProfiles()
+ }
+
+ profiles := []models.ScanProfileInfo{}
+ scanner := bufio.NewScanner(strings.NewReader(string(output)))
+
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+ if line == "" {
+ continue
+ }
+
+ // Parse profile line: "xccdf_org.ssgproject.content_profile_cis_level1_server:CIS Ubuntu 22.04 Level 1 Server Benchmark"
+ parts := strings.SplitN(line, ":", 2)
+ if len(parts) < 1 {
+ continue
+ }
+
+ xccdfId := strings.TrimSpace(parts[0])
+ name := xccdfId
+ if len(parts) == 2 {
+ name = strings.TrimSpace(parts[1])
+ }
+
+ // Determine category from profile ID
+ category := s.categorizeProfile(xccdfId)
+
+ // Create short ID from XCCDF ID
+ shortId := s.createShortId(xccdfId)
+
+ profiles = append(profiles, models.ScanProfileInfo{
+ ID: shortId,
+ Name: name,
+ Type: "openscap",
+ XCCDFId: xccdfId,
+ Category: category,
+ })
+ }
+
+ if len(profiles) == 0 {
+ return s.getDefaultProfiles()
+ }
+
+ s.logger.WithField("count", len(profiles)).Debug("Discovered profiles from SCAP content")
+ return profiles
+}
+
+// categorizeProfile determines the category of a profile based on its ID
+func (s *OpenSCAPScanner) categorizeProfile(xccdfId string) string {
+ id := strings.ToLower(xccdfId)
+ switch {
+ case strings.Contains(id, "cis"):
+ return "cis"
+ case strings.Contains(id, "stig"):
+ return "stig"
+ case strings.Contains(id, "pci") || strings.Contains(id, "pci-dss"):
+ return "pci-dss"
+ case strings.Contains(id, "hipaa"):
+ return "hipaa"
+ case strings.Contains(id, "anssi"):
+ return "anssi"
+ case strings.Contains(id, "standard"):
+ return "standard"
+ default:
+ return "other"
+ }
+}
+
+// createShortId creates a short profile ID from the full XCCDF ID
+func (s *OpenSCAPScanner) createShortId(xccdfId string) string {
+ // Extract the profile name part: xccdf_org.ssgproject.content_profile_XXX -> XXX
+ if strings.Contains(xccdfId, "_profile_") {
+ parts := strings.SplitN(xccdfId, "_profile_", 2)
+ if len(parts) == 2 {
+ return parts[1]
+ }
+ }
+ return xccdfId
+}
+
+// getDefaultProfiles returns fallback profiles when discovery fails
+func (s *OpenSCAPScanner) getDefaultProfiles() []models.ScanProfileInfo {
+ return []models.ScanProfileInfo{
+ {
+ ID: "level1_server",
+ Name: "CIS Level 1 Server",
+ Description: "Basic security hardening for servers",
+ Type: "openscap",
+ Category: "cis",
+ },
+ {
+ ID: "level2_server",
+ Name: "CIS Level 2 Server",
+ Description: "Extended security hardening (more restrictive)",
+ Type: "openscap",
+ Category: "cis",
+ },
+ }
+}
+
+// GetScannerDetails returns comprehensive scanner information
+func (s *OpenSCAPScanner) GetScannerDetails() *models.ComplianceScannerDetails {
+ contentFile := s.getContentFile()
+ contentVersion := s.GetContentPackageVersion()
+
+ // Determine minimum required SSG version for this OS
+ minVersion := ""
+ if s.osInfo.Name == "ubuntu" && s.osInfo.Version >= "24.04" {
+ minVersion = "0.1.76"
+ } else if s.osInfo.Name == "ubuntu" && s.osInfo.Version >= "22.04" {
+ minVersion = "0.1.60"
+ }
+
+ // Check if SSG needs upgrade
+ ssgNeedsUpgrade := false
+ ssgUpgradeMessage := ""
+ if minVersion != "" && contentVersion != "" {
+ if compareVersions(contentVersion, minVersion) < 0 {
+ ssgNeedsUpgrade = true
+ ssgUpgradeMessage = fmt.Sprintf("ssg-base %s is installed, but %s %s requires v%s+ for proper CIS/STIG content.",
+ contentVersion, s.osInfo.Name, s.osInfo.Version, minVersion)
+ }
+ } else if minVersion != "" && contentVersion == "" {
+ ssgNeedsUpgrade = true
+ ssgUpgradeMessage = fmt.Sprintf("ssg-base is not installed. %s %s requires ssg-base v%s+ for CIS/STIG scanning.",
+ s.osInfo.Name, s.osInfo.Version, minVersion)
+ }
+
+ // Check for content mismatch
+ contentMismatch := false
+ mismatchWarning := ""
+ if contentFile != "" && s.osInfo.Version != "" {
+ osVersion := strings.ReplaceAll(s.osInfo.Version, ".", "")
+ baseName := filepath.Base(contentFile)
+ if !strings.Contains(baseName, osVersion) {
+ contentMismatch = true
+ if ssgNeedsUpgrade {
+ mismatchWarning = ssgUpgradeMessage
+ } else {
+ mismatchWarning = fmt.Sprintf("Content file %s may not match OS version %s.", baseName, s.osInfo.Version)
+ }
+ }
+ } else if contentFile == "" && s.osInfo.Name == "ubuntu" && s.osInfo.Version >= "24.04" {
+ contentMismatch = true
+ mismatchWarning = ssgUpgradeMessage
+ if mismatchWarning == "" {
+ mismatchWarning = "No SCAP content found for Ubuntu 24.04."
+ }
+ }
+
+ // Discover available profiles dynamically
+ profiles := s.DiscoverProfiles()
+
+ // Determine content package source
+ contentPackage := fmt.Sprintf("ssg-base %s", contentVersion)
+ githubVersion := s.getInstalledSSGVersion()
+ if githubVersion != "" {
+ contentPackage = fmt.Sprintf("SSG %s (GitHub)", githubVersion)
+ }
+
+ return &models.ComplianceScannerDetails{
+ OpenSCAPVersion: s.version,
+ OpenSCAPAvailable: s.available,
+ ContentFile: filepath.Base(contentFile),
+ ContentPackage: contentPackage,
+ SSGVersion: contentVersion,
+ SSGMinVersion: minVersion,
+ SSGNeedsUpgrade: ssgNeedsUpgrade,
+ SSGUpgradeMessage: ssgUpgradeMessage,
+ AvailableProfiles: profiles,
+ OSName: s.osInfo.Name,
+ OSVersion: s.osInfo.Version,
+ OSFamily: s.osInfo.Family,
+ ContentMismatch: contentMismatch,
+ MismatchWarning: mismatchWarning,
+ }
+}
+
+// compareVersions compares two semantic version strings
+// Returns -1 if v1 < v2, 0 if equal, 1 if v1 > v2
+func compareVersions(v1, v2 string) int {
+ parts1 := strings.Split(v1, ".")
+ parts2 := strings.Split(v2, ".")
+
+ maxLen := len(parts1)
+ if len(parts2) > maxLen {
+ maxLen = len(parts2)
+ }
+
+ for i := 0; i < maxLen; i++ {
+ var n1, n2 int
+ if i < len(parts1) {
+ fmt.Sscanf(parts1[i], "%d", &n1)
+ }
+ if i < len(parts2) {
+ fmt.Sscanf(parts2[i], "%d", &n2)
+ }
+ if n1 < n2 {
+ return -1
+ }
+ if n1 > n2 {
+ return 1
+ }
+ }
+ return 0
+}
+
+// EnsureInstalled installs OpenSCAP and SCAP content if not present
+// Also upgrades existing packages to ensure latest content is available
+func (s *OpenSCAPScanner) EnsureInstalled() error {
+ s.logger.Info("Ensuring OpenSCAP is installed with latest SCAP content...")
+
+ // Create context with timeout for package operations
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
+ defer cancel()
+
+ // Environment for non-interactive apt operations
+ nonInteractiveEnv := append(os.Environ(),
+ "DEBIAN_FRONTEND=noninteractive",
+ "NEEDRESTART_MODE=a",
+ "NEEDRESTART_SUSPEND=1",
+ )
+
+ switch s.osInfo.Family {
+ case "debian":
+ // Ubuntu/Debian - always update and upgrade to get latest content
+ s.logger.Info("Installing/upgrading OpenSCAP on Debian-based system...")
+
+ // Check if Ubuntu 24.04+ (Noble Numbat)
+ isUbuntu2404Plus := s.osInfo.Name == "ubuntu" && s.osInfo.Version >= "24.04"
+ if isUbuntu2404Plus {
+ s.logger.Info("Ubuntu 24.04+ detected: CIS/STIG content requires ssg-base >= 0.1.76 or Canonical's Ubuntu Security Guide (USG)")
+ }
+
+ // Update package cache first (with timeout)
+ updateCmd := exec.CommandContext(ctx, "apt-get", "update", "-qq")
+ updateCmd.Env = nonInteractiveEnv
+ updateCmd.Run() // Ignore errors on update
+
+ // Build package list - openscap-common is required for Ubuntu 24.04+
+ packages := []string{"openscap-scanner", "openscap-common"}
+
+ // Try to install SSG content packages (may not be available for newer Ubuntu)
+ ssgPackages := []string{"ssg-debderived", "ssg-base"}
+
+ // Install core OpenSCAP packages first
+ installArgs := append([]string{"install", "-y", "-qq",
+ "-o", "Dpkg::Options::=--force-confdef",
+ "-o", "Dpkg::Options::=--force-confold"}, packages...)
+ installCmd := exec.CommandContext(ctx, "apt-get", installArgs...)
+ installCmd.Env = nonInteractiveEnv
+ output, err := installCmd.CombinedOutput()
+ if err != nil {
+ if ctx.Err() == context.DeadlineExceeded {
+ s.logger.Warn("OpenSCAP installation timed out after 5 minutes")
+ return fmt.Errorf("installation timed out after 5 minutes")
+ }
+ s.logger.WithError(err).WithField("output", string(output)).Warn("Failed to install OpenSCAP core packages")
+ // Truncate output for error message
+ outputStr := string(output)
+ if len(outputStr) > 500 {
+ outputStr = outputStr[:500] + "... (truncated)"
+ }
+ return fmt.Errorf("failed to install OpenSCAP: %w - %s", err, outputStr)
+ }
+ s.logger.Info("OpenSCAP core packages installed successfully")
+
+ // Try to install SSG content packages (best effort - may fail on Ubuntu 24.04+)
+ ssgArgs := append([]string{"install", "-y", "-qq",
+ "-o", "Dpkg::Options::=--force-confdef",
+ "-o", "Dpkg::Options::=--force-confold"}, ssgPackages...)
+ ssgCmd := exec.CommandContext(ctx, "apt-get", ssgArgs...)
+ ssgCmd.Env = nonInteractiveEnv
+ ssgOutput, ssgErr := ssgCmd.CombinedOutput()
+ if ssgErr != nil {
+ s.logger.WithField("output", string(ssgOutput)).Warn("SSG content packages not available or failed to install. CIS scanning may have limited functionality.")
+ if isUbuntu2404Plus {
+ s.logger.Info("For Ubuntu 24.04+, consider using Canonical's Ubuntu Security Guide (USG) with Ubuntu Pro for official CIS benchmarks.")
+ }
+ } else {
+ s.logger.Info("SSG content packages installed successfully")
+
+ // Explicitly upgrade to ensure we have the latest SCAP content
+ upgradeCmd := exec.CommandContext(ctx, "apt-get", "upgrade", "-y", "-qq",
+ "-o", "Dpkg::Options::=--force-confdef",
+ "-o", "Dpkg::Options::=--force-confold",
+ "ssg-base", "ssg-debderived")
+ upgradeCmd.Env = nonInteractiveEnv
+ upgradeOutput, upgradeErr := upgradeCmd.CombinedOutput()
+ if upgradeErr != nil {
+ s.logger.WithField("output", string(upgradeOutput)).Debug("Package upgrade returned non-zero (may already be latest)")
+ } else {
+ s.logger.Info("SCAP content packages upgraded to latest version")
+ }
+ }
+
+ case "rhel":
+ // RHEL/CentOS/Rocky/Alma/Fedora
+ s.logger.Info("Installing/upgrading OpenSCAP on RHEL-based system...")
+ var installCmd *exec.Cmd
+ if _, err := exec.LookPath("dnf"); err == nil {
+ installCmd = exec.CommandContext(ctx, "dnf", "install", "-y", "-q", "openscap-scanner", "scap-security-guide")
+ } else {
+ installCmd = exec.CommandContext(ctx, "yum", "install", "-y", "-q", "openscap-scanner", "scap-security-guide")
+ }
+ output, err := installCmd.CombinedOutput()
+ if err != nil {
+ if ctx.Err() == context.DeadlineExceeded {
+ s.logger.Warn("OpenSCAP installation timed out after 5 minutes")
+ return fmt.Errorf("installation timed out after 5 minutes")
+ }
+ s.logger.WithError(err).WithField("output", string(output)).Warn("Failed to install OpenSCAP")
+ outputStr := string(output)
+ if len(outputStr) > 500 {
+ outputStr = outputStr[:500] + "... (truncated)"
+ }
+ return fmt.Errorf("failed to install OpenSCAP: %w - %s", err, outputStr)
+ }
+
+ case "suse":
+ // SLES/openSUSE
+ s.logger.Info("Installing/upgrading OpenSCAP on SUSE-based system...")
+ installCmd := exec.CommandContext(ctx, "zypper", "--non-interactive", "install", "openscap-utils", "scap-security-guide")
+ output, err := installCmd.CombinedOutput()
+ if err != nil {
+ if ctx.Err() == context.DeadlineExceeded {
+ s.logger.Warn("OpenSCAP installation timed out after 5 minutes")
+ return fmt.Errorf("installation timed out after 5 minutes")
+ }
+ s.logger.WithError(err).WithField("output", string(output)).Warn("Failed to install OpenSCAP")
+ outputStr := string(output)
+ if len(outputStr) > 500 {
+ outputStr = outputStr[:500] + "... (truncated)"
+ }
+ return fmt.Errorf("failed to install OpenSCAP: %w - %s", err, outputStr)
+ }
+
+ default:
+ return fmt.Errorf("unsupported OS family: %s (OS: %s)", s.osInfo.Family, s.osInfo.Name)
+ }
+
+ s.logger.Info("OpenSCAP installed/upgraded successfully")
+
+ // Re-check availability after installation
+ s.checkAvailability()
+ if !s.available {
+ return fmt.Errorf("OpenSCAP installed but still not available - content files may be missing")
+ }
+
+ // Check for content version mismatch
+ s.checkContentCompatibility()
+
+ return nil
+}
+
+// checkContentCompatibility checks if the SCAP content is compatible with the OS version
+func (s *OpenSCAPScanner) checkContentCompatibility() {
+ contentFile := s.getContentFile()
+ if contentFile == "" {
+ s.logger.Warn("No SCAP content file found - compliance scans will not work correctly")
+ return
+ }
+
+ // Extract version from content file name (e.g., ssg-ubuntu2204-ds.xml -> 22.04)
+ baseName := filepath.Base(contentFile)
+
+ // Log detected content file
+ s.logger.WithFields(logrus.Fields{
+ "os_name": s.osInfo.Name,
+ "os_version": s.osInfo.Version,
+ "content_file": baseName,
+ }).Debug("Checking SCAP content compatibility")
+
+ // Check if content file matches OS version
+ osVersion := strings.ReplaceAll(s.osInfo.Version, ".", "")
+ expectedPattern := fmt.Sprintf("ssg-%s%s", s.osInfo.Name, osVersion)
+
+ if !strings.Contains(baseName, osVersion) && !strings.HasPrefix(baseName, expectedPattern) {
+ s.logger.WithFields(logrus.Fields{
+ "os_version": s.osInfo.Version,
+ "content_file": baseName,
+ }).Warn("SCAP content may not match OS version - scan results may show many 'notapplicable' rules. Consider updating ssg-base package.")
+ }
+}
+
+// UpgradeSSGContent upgrades the SCAP Security Guide content from GitHub releases
+func (s *OpenSCAPScanner) UpgradeSSGContent() error {
+ s.logger.Info("Upgrading SCAP Security Guide content from GitHub...")
+
+ // Download and install from GitHub
+ if err := s.installSSGFromGitHub(); err != nil {
+ s.logger.WithError(err).Warn("Failed to install SSG from GitHub")
+ return err
+ }
+
+ // Re-check availability after upgrade
+ s.checkAvailability()
+ s.checkContentCompatibility()
+
+ // Verify the new version
+ newVersion := s.getInstalledSSGVersion()
+ s.logger.WithField("version", newVersion).Info("SSG content upgrade completed")
+
+ return nil
+}
+
+// installSSGFromGitHub downloads and installs SSG content from GitHub releases
+func (s *OpenSCAPScanner) installSSGFromGitHub() error {
+ // Latest stable version - update this periodically
+ const ssgVersion = "0.1.79"
+ const ssgURL = "https://github.com/ComplianceAsCode/content/releases/download/v" + ssgVersion + "/scap-security-guide-" + ssgVersion + ".zip"
+
+ s.logger.WithFields(map[string]interface{}{
+ "version": ssgVersion,
+ "url": ssgURL,
+ }).Info("Downloading SSG from GitHub...")
+
+ // Create temp directory
+ tmpDir, err := os.MkdirTemp("", "ssg-upgrade-")
+ if err != nil {
+ return fmt.Errorf("failed to create temp directory: %w", err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ zipPath := filepath.Join(tmpDir, "ssg.zip")
+
+ // Download the zip file
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
+ defer cancel()
+
+ if err := s.downloadFile(ctx, ssgURL, zipPath); err != nil {
+ return fmt.Errorf("failed to download SSG: %w", err)
+ }
+
+ s.logger.Info("Extracting SSG content...")
+
+ // Extract the zip file
+ extractDir := filepath.Join(tmpDir, "extracted")
+ if err := s.extractZip(zipPath, extractDir); err != nil {
+ return fmt.Errorf("failed to extract SSG: %w", err)
+ }
+
+ // Find the content directory in the extracted files
+ contentSrcDir := filepath.Join(extractDir, "scap-security-guide-"+ssgVersion)
+ if _, err := os.Stat(contentSrcDir); os.IsNotExist(err) {
+ // Try without version suffix
+ entries, _ := os.ReadDir(extractDir)
+ for _, e := range entries {
+ if e.IsDir() && strings.HasPrefix(e.Name(), "scap-security-guide") {
+ contentSrcDir = filepath.Join(extractDir, e.Name())
+ break
+ }
+ }
+ }
+
+ // Ensure target directory exists
+ targetDir := scapContentDir
+ if err := os.MkdirAll(targetDir, 0755); err != nil {
+ return fmt.Errorf("failed to create content directory: %w", err)
+ }
+
+ // Copy all XML files (datastream files) to the target directory
+ s.logger.WithField("target", targetDir).Info("Installing SSG content files...")
+
+ xmlFiles, err := filepath.Glob(filepath.Join(contentSrcDir, "*.xml"))
+ if err != nil {
+ return fmt.Errorf("failed to find XML files: %w", err)
+ }
+
+ if len(xmlFiles) == 0 {
+ // Try looking in subdirectories
+ xmlFiles, _ = filepath.Glob(filepath.Join(contentSrcDir, "*", "*.xml"))
+ }
+
+ copiedCount := 0
+ for _, src := range xmlFiles {
+ baseName := filepath.Base(src)
+ // Only copy datastream files (ssg-*-ds.xml)
+ if strings.HasPrefix(baseName, "ssg-") && strings.HasSuffix(baseName, "-ds.xml") {
+ dst := filepath.Join(targetDir, baseName)
+ if err := s.copyFile(src, dst); err != nil {
+ s.logger.WithError(err).WithField("file", baseName).Warn("Failed to copy content file")
+ } else {
+ copiedCount++
+ }
+ }
+ }
+
+ if copiedCount == 0 {
+ return fmt.Errorf("no SSG content files were installed")
+ }
+
+ s.logger.WithField("files_installed", copiedCount).Info("SSG content files installed successfully")
+
+ // Create a version marker file
+ versionFile := filepath.Join(targetDir, ".ssg-version")
+ os.WriteFile(versionFile, []byte(ssgVersion+"\n"), 0644)
+
+ return nil
+}
+
+// downloadFile downloads a file from a URL
+func (s *OpenSCAPScanner) downloadFile(ctx context.Context, url, destPath string) error {
+ req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
+ if err != nil {
+ return err
+ }
+
+ client := &http.Client{
+ Timeout: 5 * time.Minute,
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("HTTP error: %s", resp.Status)
+ }
+
+ out, err := os.Create(destPath)
+ if err != nil {
+ return err
+ }
+ defer out.Close()
+
+ _, err = io.Copy(out, resp.Body)
+ return err
+}
+
+// extractZip extracts a zip file to a directory
+func (s *OpenSCAPScanner) extractZip(zipPath, destDir string) error {
+ r, err := zip.OpenReader(zipPath)
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ if err := os.MkdirAll(destDir, 0755); err != nil {
+ return err
+ }
+
+ for _, f := range r.File {
+ fpath := filepath.Join(destDir, f.Name)
+
+ // Check for ZipSlip vulnerability
+ if !strings.HasPrefix(fpath, filepath.Clean(destDir)+string(os.PathSeparator)) {
+ return fmt.Errorf("invalid file path: %s", fpath)
+ }
+
+ if f.FileInfo().IsDir() {
+ os.MkdirAll(fpath, f.Mode())
+ continue
+ }
+
+ if err := os.MkdirAll(filepath.Dir(fpath), 0755); err != nil {
+ return err
+ }
+
+ outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
+ if err != nil {
+ return err
+ }
+
+ rc, err := f.Open()
+ if err != nil {
+ outFile.Close()
+ return err
+ }
+
+ _, err = io.Copy(outFile, rc)
+ outFile.Close()
+ rc.Close()
+
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// copyFile copies a file from src to dst
+func (s *OpenSCAPScanner) copyFile(src, dst string) error {
+ in, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer in.Close()
+
+ out, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer out.Close()
+
+ _, err = io.Copy(out, in)
+ if err != nil {
+ return err
+ }
+
+ return out.Chmod(0644)
+}
+
+// getInstalledSSGVersion reads the version from the marker file
+func (s *OpenSCAPScanner) getInstalledSSGVersion() string {
+ versionFile := filepath.Join(scapContentDir, ".ssg-version")
+ data, err := os.ReadFile(versionFile)
+ if err != nil {
+ return ""
+ }
+ return strings.TrimSpace(string(data))
+}
+
+// checkAvailability checks if OpenSCAP is installed and has content
+func (s *OpenSCAPScanner) checkAvailability() {
+ // Check if oscap binary exists
+ path, err := exec.LookPath(oscapBinary)
+ if err != nil {
+ s.logger.Debug("OpenSCAP binary not found")
+ s.available = false
+ return
+ }
+ s.logger.WithField("path", path).Debug("Found OpenSCAP binary")
+
+ // Get version
+ cmd := exec.Command(oscapBinary, "--version")
+ output, err := cmd.Output()
+ if err != nil {
+ s.logger.WithError(err).Debug("Failed to get OpenSCAP version")
+ s.available = false
+ return
+ }
+
+ // Parse version from output
+ lines := strings.Split(string(output), "\n")
+ if len(lines) > 0 {
+ s.version = strings.TrimSpace(lines[0])
+ }
+
+ // Check if SCAP content exists
+ contentFile := s.getContentFile()
+ if contentFile == "" {
+ s.logger.Debug("No SCAP content files found")
+ s.available = false
+ return
+ }
+
+ s.available = true
+ s.logger.WithFields(logrus.Fields{
+ "version": s.version,
+ "content": contentFile,
+ }).Debug("OpenSCAP is available")
+}
+
+// detectOS detects the operating system
+func (s *OpenSCAPScanner) detectOS() models.ComplianceOSInfo {
+ info := models.ComplianceOSInfo{}
+
+ file, err := os.Open(osReleasePath)
+ if err != nil {
+ s.logger.WithError(err).Debug("Failed to open os-release")
+ return info
+ }
+ defer file.Close()
+
+ scanner := bufio.NewScanner(file)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.SplitN(line, "=", 2)
+ if len(parts) != 2 {
+ continue
+ }
+ key := parts[0]
+ value := strings.Trim(parts[1], "\"")
+
+ switch key {
+ case "ID":
+ info.Name = value
+ case "VERSION_ID":
+ info.Version = value
+ case "ID_LIKE":
+ // Determine family from ID_LIKE
+ if strings.Contains(value, "debian") {
+ info.Family = "debian"
+ } else if strings.Contains(value, "rhel") || strings.Contains(value, "fedora") {
+ info.Family = "rhel"
+ } else if strings.Contains(value, "suse") {
+ info.Family = "suse"
+ }
+ }
+ }
+
+ // Set family from ID if not set from ID_LIKE
+ if info.Family == "" {
+ switch info.Name {
+ case "ubuntu", "debian":
+ info.Family = "debian"
+ case "rhel", "centos", "rocky", "alma", "fedora":
+ info.Family = "rhel"
+ case "sles", "opensuse", "opensuse-leap":
+ info.Family = "suse"
+ }
+ }
+
+ return info
+}
+
+// getContentFile returns the appropriate SCAP content file for this OS
+func (s *OpenSCAPScanner) getContentFile() string {
+ if s.osInfo.Name == "" {
+ return ""
+ }
+
+ // Build possible content file names
+ patterns := []string{
+ fmt.Sprintf("ssg-%s%s-ds.xml", s.osInfo.Name, strings.ReplaceAll(s.osInfo.Version, ".", "")),
+ fmt.Sprintf("ssg-%s%s-ds.xml", s.osInfo.Name, strings.Split(s.osInfo.Version, ".")[0]),
+ fmt.Sprintf("ssg-%s-ds.xml", s.osInfo.Name),
+ }
+
+ // Check each pattern
+ for _, pattern := range patterns {
+ path := filepath.Join(scapContentDir, pattern)
+ if _, err := os.Stat(path); err == nil {
+ return path
+ }
+ }
+
+ // Try to find any matching file
+ matches, err := filepath.Glob(filepath.Join(scapContentDir, fmt.Sprintf("ssg-%s*-ds.xml", s.osInfo.Name)))
+ if err == nil && len(matches) > 0 {
+ return matches[0]
+ }
+
+ return ""
+}
+
+// GetAvailableProfiles returns available CIS profiles for this system
+func (s *OpenSCAPScanner) GetAvailableProfiles() []string {
+ profiles := make([]string, 0)
+
+ if !s.available {
+ return profiles
+ }
+
+ for profileName, osProfiles := range profileMappings {
+ if _, exists := osProfiles[s.osInfo.Name]; exists {
+ profiles = append(profiles, profileName)
+ }
+ }
+
+ return profiles
+}
+
+// getProfileID returns the full profile ID for this OS
+func (s *OpenSCAPScanner) getProfileID(profileName string) string {
+ // If it's already a full XCCDF profile ID, use it directly
+ if strings.HasPrefix(profileName, "xccdf_") {
+ return profileName
+ }
+
+ // Otherwise, look up the mapping for this OS
+ if osProfiles, exists := profileMappings[profileName]; exists {
+ if profileID, exists := osProfiles[s.osInfo.Name]; exists {
+ return profileID
+ }
+ }
+ return ""
+}
+
+// RunScan executes an OpenSCAP scan (legacy method - calls RunScanWithOptions with defaults)
+func (s *OpenSCAPScanner) RunScan(ctx context.Context, profileName string) (*models.ComplianceScan, error) {
+ return s.RunScanWithOptions(ctx, &models.ComplianceScanOptions{
+ ProfileID: profileName,
+ })
+}
+
+// RunScanWithOptions executes an OpenSCAP scan with configurable options
+func (s *OpenSCAPScanner) RunScanWithOptions(ctx context.Context, options *models.ComplianceScanOptions) (*models.ComplianceScan, error) {
+ if !s.available {
+ return nil, fmt.Errorf("OpenSCAP is not available")
+ }
+
+ startTime := time.Now()
+
+ contentFile := s.getContentFile()
+ if contentFile == "" {
+ return nil, fmt.Errorf("no SCAP content file found for %s %s", s.osInfo.Name, s.osInfo.Version)
+ }
+
+ profileID := s.getProfileID(options.ProfileID)
+ if profileID == "" {
+ return nil, fmt.Errorf("profile %s not available for %s", options.ProfileID, s.osInfo.Name)
+ }
+
+ // Create temp file for results
+ resultsFile, err := os.CreateTemp("", "oscap-results-*.xml")
+ if err != nil {
+ return nil, fmt.Errorf("failed to create temp file: %w", err)
+ }
+ resultsPath := resultsFile.Name()
+ resultsFile.Close()
+ defer os.Remove(resultsPath)
+
+ // Create temp file for OVAL results (contains detailed check data)
+ ovalResultsFile, err := os.CreateTemp("", "oscap-oval-*.xml")
+ if err != nil {
+ return nil, fmt.Errorf("failed to create oval temp file: %w", err)
+ }
+ ovalResultsPath := ovalResultsFile.Name()
+ ovalResultsFile.Close()
+ defer os.Remove(ovalResultsPath)
+
+ // Build command arguments
+ args := []string{
+ "xccdf", "eval",
+ "--profile", profileID,
+ "--results", resultsPath,
+ "--oval-results", // Generate detailed OVAL results with actual values
+ }
+
+ // Add optional arguments based on options
+ if options.EnableRemediation {
+ args = append(args, "--remediate")
+ s.logger.Info("Remediation enabled - will attempt to fix failed rules")
+ }
+
+ // Add rule filter for single rule remediation
+ if options.RuleID != "" {
+ args = append(args, "--rule", options.RuleID)
+ s.logger.WithField("rule_id", options.RuleID).Info("Filtering scan to single rule")
+ }
+
+ if options.FetchRemoteResources {
+ args = append(args, "--fetch-remote-resources")
+ }
+
+ if options.TailoringFile != "" {
+ args = append(args, "--tailoring-file", options.TailoringFile)
+ }
+
+ // Add ARF output if requested
+ if options.OutputFormat == "arf" {
+ arfFile, err := os.CreateTemp("", "oscap-arf-*.xml")
+ if err == nil {
+ arfPath := arfFile.Name()
+ arfFile.Close()
+ defer os.Remove(arfPath)
+ args = append(args, "--results-arf", arfPath)
+ }
+ }
+
+ // Add content file last
+ args = append(args, contentFile)
+
+ s.logger.WithFields(logrus.Fields{
+ "profile": options.ProfileID,
+ "profile_id": profileID,
+ "content": contentFile,
+ "remediation": options.EnableRemediation,
+ }).Info("Starting OpenSCAP scan (this may take several minutes)...")
+
+ // Run oscap with progress logging
+ cmd := exec.CommandContext(ctx, oscapBinary, args...)
+
+ // Start a goroutine to log progress every 30 seconds
+ done := make(chan struct{})
+ go func() {
+ ticker := time.NewTicker(30 * time.Second)
+ defer ticker.Stop()
+ elapsed := 0
+ for {
+ select {
+ case <-done:
+ return
+ case <-ticker.C:
+ elapsed += 30
+ s.logger.WithField("elapsed_seconds", elapsed).Info("OpenSCAP scan still running...")
+ }
+ }
+ }()
+
+ output, err := cmd.CombinedOutput()
+ close(done)
+
+ // oscap returns non-zero exit code if there are failures, which is expected
+ // We only care about actual execution errors
+ if err != nil {
+ if ctx.Err() != nil {
+ return nil, fmt.Errorf("scan cancelled or timed out: %w", ctx.Err())
+ }
+ if exitErr, ok := err.(*exec.ExitError); ok {
+ // Exit code 1 or 2 means there were rule failures - this is normal
+ if exitErr.ExitCode() != 2 && exitErr.ExitCode() != 1 {
+ // Truncate output for error message (keep first 500 chars)
+ outputStr := string(output)
+ if len(outputStr) > 500 {
+ outputStr = outputStr[:500] + "... (truncated)"
+ }
+ return nil, fmt.Errorf("oscap execution failed (exit code %d): %s", exitErr.ExitCode(), outputStr)
+ }
+ } else {
+ // Other errors (like signal killed)
+ return nil, fmt.Errorf("oscap execution failed: %w", err)
+ }
+ }
+
+ // Parse results (pass oscap output and content file for metadata)
+ scan, err := s.parseResults(resultsPath, contentFile, options.ProfileID, string(output))
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse results: %w", err)
+ }
+
+ scan.StartedAt = startTime
+ now := time.Now()
+ scan.CompletedAt = &now
+ scan.Status = "completed"
+ scan.RemediationApplied = options.EnableRemediation
+
+ return scan, nil
+}
+
+// GenerateRemediationScript generates a shell script to fix failed rules
+func (s *OpenSCAPScanner) GenerateRemediationScript(ctx context.Context, resultsPath string, outputPath string) error {
+ if !s.available {
+ return fmt.Errorf("OpenSCAP is not available")
+ }
+
+ args := []string{
+ "xccdf", "generate", "fix",
+ "--template", "urn:xccdf:fix:script:sh",
+ "--output", outputPath,
+ resultsPath,
+ }
+
+ s.logger.WithField("output", outputPath).Debug("Generating remediation script")
+
+ cmd := exec.CommandContext(ctx, oscapBinary, args...)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ // Truncate output for error message
+ outputStr := string(output)
+ if len(outputStr) > 500 {
+ outputStr = outputStr[:500] + "... (truncated)"
+ }
+ return fmt.Errorf("failed to generate remediation script: %w - %s", err, outputStr)
+ }
+
+ s.logger.WithField("output", outputPath).Info("Remediation script generated")
+ return nil
+}
+
+// RunOfflineRemediation applies fixes from a previous scan result
+func (s *OpenSCAPScanner) RunOfflineRemediation(ctx context.Context, resultsPath string) error {
+ if !s.available {
+ return fmt.Errorf("OpenSCAP is not available")
+ }
+
+ contentFile := s.getContentFile()
+ if contentFile == "" {
+ return fmt.Errorf("no SCAP content file found")
+ }
+
+ args := []string{
+ "xccdf", "remediate",
+ "--results", resultsPath,
+ contentFile,
+ }
+
+ s.logger.WithField("results", resultsPath).Info("Running offline remediation")
+
+ cmd := exec.CommandContext(ctx, oscapBinary, args...)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ if exitErr, ok := err.(*exec.ExitError); ok {
+ // Non-zero exit is expected if some remediations fail
+ if exitErr.ExitCode() > 2 {
+ // Truncate output for error message
+ outputStr := string(output)
+ if len(outputStr) > 500 {
+ outputStr = outputStr[:500] + "... (truncated)"
+ }
+ return fmt.Errorf("remediation failed (exit code %d): %s", exitErr.ExitCode(), outputStr)
+ }
+ } else {
+ return fmt.Errorf("remediation execution failed: %w", err)
+ }
+ }
+
+ s.logger.Info("Offline remediation completed")
+ return nil
+}
+
+// XCCDF result structures for parsing
+type xccdfTestResult struct {
+ XMLName xml.Name `xml:"TestResult"`
+ Rules []xccdfRuleResult `xml:"rule-result"`
+}
+
+type xccdfRuleResult struct {
+ IDRef string `xml:"idref,attr"`
+ Result string `xml:"result"`
+}
+
+// ruleMetadata holds extracted rule information from the benchmark
+type ruleMetadata struct {
+ Title string
+ Description string
+ Rationale string
+ Severity string
+ Remediation string
+ Section string
+}
+
+// parseResults parses the XCCDF results file and extracts rich metadata from the benchmark
+func (s *OpenSCAPScanner) parseResults(resultsPath string, contentFile string, profileName string, oscapOutput string) (*models.ComplianceScan, error) {
+ data, err := os.ReadFile(resultsPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read results: %w", err)
+ }
+
+ resultsContent := string(data)
+
+ // Extract TestResult section (simplified parsing)
+ scan := &models.ComplianceScan{
+ ProfileName: profileName,
+ ProfileType: "openscap",
+ Results: make([]models.ComplianceResult, 0),
+ }
+
+ // Extract rule metadata from the BENCHMARK file (not results file)
+ // The benchmark file (ssg-*-ds.xml) contains Rule definitions with title, description, etc.
+ benchmarkContent := ""
+ if contentFile != "" {
+ if benchmarkData, err := os.ReadFile(contentFile); err == nil {
+ benchmarkContent = string(benchmarkData)
+ s.logger.WithField("content_file", contentFile).Debug("Loaded benchmark file for metadata extraction")
+ } else {
+ s.logger.WithError(err).Warn("Failed to read benchmark file for metadata")
+ }
+ }
+
+ // Try results file first (might have embedded benchmark), then fall back to benchmark file
+ s.logger.WithFields(map[string]interface{}{
+ "results_content_len": len(resultsContent),
+ "benchmark_content_len": len(benchmarkContent),
+ }).Info("Starting metadata extraction")
+
+ ruleMetadataMap := s.extractRuleMetadata(resultsContent)
+ s.logger.WithField("rules_from_results", len(ruleMetadataMap)).Info("Extracted metadata from results file")
+
+ if len(ruleMetadataMap) == 0 && benchmarkContent != "" {
+ s.logger.Info("No metadata in results file, extracting from benchmark datastream")
+ ruleMetadataMap = s.extractRuleMetadata(benchmarkContent)
+ s.logger.WithField("rules_from_benchmark", len(ruleMetadataMap)).Info("Extracted metadata from benchmark file")
+ }
+
+ // Parse oscap output for rule-specific failure details
+ // oscap output format: "Title rule_id result"
+ // For failures, additional detail lines follow
+ ruleOutputMap := s.parseOscapOutput(oscapOutput)
+
+ // Parse rule results with optional message element
+ // Pattern captures: idref, full rule-result block content
+ ruleResultPattern := regexp.MustCompile(`]*idref="([^"]+)"[^>]*>([\s\S]*?)`)
+ resultPattern := regexp.MustCompile(`([^<]+)`)
+ messagePattern := regexp.MustCompile(`]*>([^<]+)`)
+
+ matches := ruleResultPattern.FindAllStringSubmatch(resultsContent, -1)
+
+ for _, match := range matches {
+ if len(match) >= 3 {
+ ruleID := match[1]
+ ruleResultContent := match[2]
+
+ // Extract result status
+ resultMatch := resultPattern.FindStringSubmatch(ruleResultContent)
+ if len(resultMatch) < 2 {
+ continue
+ }
+ result := strings.TrimSpace(resultMatch[1])
+ status := s.mapResult(result)
+
+ // Extract message if present (contains specific check output for failures)
+ var finding string
+ messageMatch := messagePattern.FindStringSubmatch(ruleResultContent)
+ if len(messageMatch) >= 2 {
+ finding = strings.TrimSpace(messageMatch[1])
+ }
+
+ // If no finding from XML, try to get from oscap output
+ if finding == "" && status == "fail" {
+ if outputInfo, ok := ruleOutputMap[ruleID]; ok {
+ finding = outputInfo
+ }
+ }
+
+ // Update counters
+ switch status {
+ case "pass":
+ scan.Passed++
+ case "fail":
+ scan.Failed++
+ case "warn":
+ scan.Warnings++
+ case "skip":
+ scan.Skipped++
+ case "notapplicable":
+ scan.NotApplicable++
+ }
+ scan.TotalRules++
+
+ // Get metadata from embedded benchmark
+ metadata := ruleMetadataMap[ruleID]
+
+ // Use extracted title or fall back to generated one
+ title := metadata.Title
+ if title == "" {
+ title = s.extractTitle(ruleID)
+ }
+
+ // Extract actual/expected from finding if possible
+ actual, expected := s.parseActualExpected(finding, metadata.Description)
+
+ scan.Results = append(scan.Results, models.ComplianceResult{
+ RuleID: ruleID,
+ Title: title,
+ Status: status,
+ Finding: finding,
+ Actual: actual,
+ Expected: expected,
+ Description: metadata.Description,
+ Severity: metadata.Severity,
+ Remediation: metadata.Remediation,
+ Section: metadata.Section,
+ })
+
+ // Debug logging for result assembly (only for failed rules to reduce noise)
+ if status == "fail" {
+ s.logger.WithFields(map[string]interface{}{
+ "rule_id": ruleID,
+ "title": title,
+ "status": status,
+ "has_description": len(metadata.Description) > 0,
+ "desc_len": len(metadata.Description),
+ "has_remediation": len(metadata.Remediation) > 0,
+ "severity": metadata.Severity,
+ }).Debug("Assembled failed rule result")
+ }
+ }
+ }
+
+ // Calculate score
+ if scan.TotalRules > 0 {
+ applicable := scan.TotalRules - scan.NotApplicable - scan.Skipped
+ if applicable > 0 {
+ scan.Score = float64(scan.Passed) / float64(applicable) * 100
+ }
+ }
+
+ return scan, nil
+}
+
+// parseOscapOutput extracts rule-specific information from oscap stdout
+func (s *OpenSCAPScanner) parseOscapOutput(output string) map[string]string {
+ ruleInfo := make(map[string]string)
+
+ // oscap output contains lines like:
+ // "Title\trule_id\tresult"
+ // For failed rules, we want to capture any additional context
+ lines := strings.Split(output, "\n")
+
+ var currentRuleID string
+ var currentDetails []string
+
+ for _, line := range lines {
+ line = strings.TrimSpace(line)
+ if line == "" {
+ continue
+ }
+
+ // Check if this is a rule result line (contains rule ID pattern)
+ if strings.Contains(line, "xccdf_org.ssgproject.content_rule_") {
+ // Save previous rule's details if any
+ if currentRuleID != "" && len(currentDetails) > 0 {
+ ruleInfo[currentRuleID] = strings.Join(currentDetails, "; ")
+ }
+
+ // Extract rule ID from line
+ rulePattern := regexp.MustCompile(`(xccdf_org\.ssgproject\.content_rule_[^\s\t]+)`)
+ if match := rulePattern.FindStringSubmatch(line); len(match) >= 2 {
+ currentRuleID = match[1]
+ currentDetails = nil
+
+ // Check if line contains failure indicator and additional info
+ if strings.Contains(strings.ToLower(line), "fail") {
+ // Look for any additional info after the status
+ parts := strings.Split(line, "\t")
+ if len(parts) > 3 {
+ currentDetails = append(currentDetails, strings.Join(parts[3:], " "))
+ }
+ }
+ }
+ } else if currentRuleID != "" && !strings.HasPrefix(line, "Title") {
+ // This might be additional detail for the current rule
+ // Capture lines that look like check output (often start with paths or values)
+ if strings.HasPrefix(line, "/") || strings.Contains(line, "=") || strings.Contains(line, ":") {
+ currentDetails = append(currentDetails, line)
+ }
+ }
+ }
+
+ // Save last rule's details
+ if currentRuleID != "" && len(currentDetails) > 0 {
+ ruleInfo[currentRuleID] = strings.Join(currentDetails, "; ")
+ }
+
+ return ruleInfo
+}
+
+// parseActualExpected attempts to extract actual and expected values from finding text
+func (s *OpenSCAPScanner) parseActualExpected(finding string, description string) (actual, expected string) {
+ if finding == "" {
+ return "", ""
+ }
+
+ // Common patterns in XCCDF findings:
+ // "expected X but found Y"
+ // "value is X, should be Y"
+ // "X is set to Y"
+
+ // Pattern: "expected ... but found ..."
+ pattern1 := regexp.MustCompile(`(?i)expected\s+['"]?([^'"]+?)['"]?\s+but\s+found\s+['"]?([^'"]+?)['"]?`)
+ if match := pattern1.FindStringSubmatch(finding); len(match) >= 3 {
+ return match[2], match[1] // actual, expected
+ }
+
+ // Pattern: "found ... expected ..."
+ pattern2 := regexp.MustCompile(`(?i)found\s+['"]?([^'"]+?)['"]?\s+expected\s+['"]?([^'"]+?)['"]?`)
+ if match := pattern2.FindStringSubmatch(finding); len(match) >= 3 {
+ return match[1], match[2]
+ }
+
+ // Pattern: "is set to X" (actual value)
+ pattern3 := regexp.MustCompile(`(?i)is\s+set\s+to\s+['"]?([^'"]+?)['"]?`)
+ if match := pattern3.FindStringSubmatch(finding); len(match) >= 2 {
+ actual = match[1]
+ }
+
+ // Pattern: "should be X" (expected value)
+ pattern4 := regexp.MustCompile(`(?i)should\s+be\s+['"]?([^'"]+?)['"]?`)
+ if match := pattern4.FindStringSubmatch(finding); len(match) >= 2 {
+ expected = match[1]
+ }
+
+ // Pattern: "value X" or "= X"
+ pattern5 := regexp.MustCompile(`(?:value|=)\s*['"]?(\S+)['"]?`)
+ if actual == "" {
+ if match := pattern5.FindStringSubmatch(finding); len(match) >= 2 {
+ actual = match[1]
+ }
+ }
+
+ return actual, expected
+}
+
+// extractRuleMetadata extracts rule definitions from the embedded benchmark in XCCDF results
+func (s *OpenSCAPScanner) extractRuleMetadata(content string) map[string]ruleMetadata {
+ metadata := make(map[string]ruleMetadata)
+
+ // Extract Rule elements using a more robust approach:
+ // 1. Find all Rule opening tags and their positions
+ // 2. Find the corresponding closing tag (handling nesting)
+ // 3. Extract attributes and content separately
+
+ // Pattern to match Rule opening tags with any attributes
+ // Namespace prefix can be like "xccdf-1.2:" so we need to include dots and hyphens
+ ruleOpenPattern := regexp.MustCompile(`<([a-zA-Z0-9._-]*:)?Rule\s+([^>]*)>`)
+ idPattern := regexp.MustCompile(`id="([^"]+)"`)
+ severityAttrPattern := regexp.MustCompile(`severity="([^"]*)"`)
+
+ // Patterns for child elements (handle any namespace prefix including dots like xccdf-1.2:)
+ titlePattern := regexp.MustCompile(`<([a-zA-Z0-9._-]*:)?title[^>]*>([^<]+)([a-zA-Z0-9._-]*:)?title>`)
+ descPattern := regexp.MustCompile(`<([a-zA-Z0-9._-]*:)?description[^>]*>([\s\S]*?)([a-zA-Z0-9._-]*:)?description>`)
+ rationalePattern := regexp.MustCompile(`<([a-zA-Z0-9._-]*:)?rationale[^>]*>([\s\S]*?)([a-zA-Z0-9._-]*:)?rationale>`)
+ // For fix elements, prefer shell script remediation (system="urn:xccdf:fix:script:sh")
+ fixShPattern := regexp.MustCompile(`<([a-zA-Z0-9._-]*:)?fix[^>]*system="urn:xccdf:fix:script:sh"[^>]*>([\s\S]*?)([a-zA-Z0-9._-]*:)?fix>`)
+ fixPattern := regexp.MustCompile(`<([a-zA-Z0-9._-]*:)?fix[^>]*>([\s\S]*?)([a-zA-Z0-9._-]*:)?fix>`)
+ fixTextPattern := regexp.MustCompile(`<([a-zA-Z0-9._-]*:)?fixtext[^>]*>([\s\S]*?)([a-zA-Z0-9._-]*:)?fixtext>`)
+
+ // Find all Rule opening tags
+ openMatches := ruleOpenPattern.FindAllStringSubmatchIndex(content, -1)
+
+ for _, openMatch := range openMatches {
+ if len(openMatch) < 6 {
+ continue
+ }
+
+ tagStart := openMatch[0]
+ tagEnd := openMatch[1]
+ nsPrefix := ""
+ if openMatch[2] >= 0 && openMatch[3] > openMatch[2] {
+ nsPrefix = content[openMatch[2]:openMatch[3]]
+ }
+ attributes := content[openMatch[4]:openMatch[5]]
+
+ // Extract id from attributes
+ idMatch := idPattern.FindStringSubmatch(attributes)
+ if len(idMatch) < 2 {
+ continue
+ }
+ ruleID := idMatch[1]
+
+ // Find the closing tag for this Rule element
+ // Build the closing tag pattern based on namespace prefix
+ closingTag := "" + nsPrefix + "Rule>"
+ openingTag := "<" + nsPrefix + "Rule"
+
+ // Find closing tag, accounting for potential nested Rule elements
+ ruleContent := ""
+ depth := 1
+ searchStart := tagEnd
+ for depth > 0 && searchStart < len(content) {
+ nextOpen := strings.Index(content[searchStart:], openingTag)
+ nextClose := strings.Index(content[searchStart:], closingTag)
+
+ if nextClose == -1 {
+ // No closing tag found
+ break
+ }
+
+ if nextOpen != -1 && nextOpen < nextClose {
+ // Found another opening tag before closing
+ depth++
+ searchStart = searchStart + nextOpen + len(openingTag)
+ } else {
+ // Found closing tag
+ depth--
+ if depth == 0 {
+ ruleContent = content[tagEnd : searchStart+nextClose]
+ }
+ searchStart = searchStart + nextClose + len(closingTag)
+ }
+ }
+
+ // If nesting approach failed, try simpler non-greedy match
+ if ruleContent == "" {
+ // Look for closing tag within reasonable distance (500KB limit per rule)
+ endIdx := tagStart + 500000
+ if endIdx > len(content) {
+ endIdx = len(content)
+ }
+ searchContent := content[tagEnd:endIdx]
+ closeIdx := strings.Index(searchContent, closingTag)
+ if closeIdx != -1 {
+ ruleContent = searchContent[:closeIdx]
+ }
+ }
+
+ if ruleContent == "" {
+ s.logger.WithField("rule_id", ruleID).Debug("Could not find Rule content")
+ continue
+ }
+
+ meta := ruleMetadata{}
+
+ // Extract severity from attributes
+ if sevMatch := severityAttrPattern.FindStringSubmatch(attributes); len(sevMatch) >= 2 {
+ meta.Severity = sevMatch[1]
+ }
+
+ // Extract title - use the inner text (group 2)
+ if titleMatch := titlePattern.FindStringSubmatch(ruleContent); len(titleMatch) >= 3 {
+ meta.Title = s.cleanXMLText(titleMatch[2])
+ }
+
+ // Extract description - use the inner text (group 2)
+ if descMatch := descPattern.FindStringSubmatch(ruleContent); len(descMatch) >= 3 {
+ meta.Description = s.cleanXMLText(descMatch[2])
+ }
+
+ // Extract rationale (append to description if present)
+ if ratMatch := rationalePattern.FindStringSubmatch(ruleContent); len(ratMatch) >= 3 {
+ rationale := s.cleanXMLText(ratMatch[2])
+ if rationale != "" {
+ if meta.Description != "" {
+ meta.Description = meta.Description + "\n\nRationale: " + rationale
+ } else {
+ meta.Description = "Rationale: " + rationale
+ }
+ }
+ }
+
+ // Extract fix/remediation - prefer shell script fix, then any fix, then fixtext
+ if fixShMatch := fixShPattern.FindStringSubmatch(ruleContent); len(fixShMatch) >= 3 {
+ meta.Remediation = s.cleanXMLText(fixShMatch[2])
+ }
+ if meta.Remediation == "" {
+ if fixMatch := fixPattern.FindStringSubmatch(ruleContent); len(fixMatch) >= 3 {
+ meta.Remediation = s.cleanXMLText(fixMatch[2])
+ }
+ }
+ if meta.Remediation == "" {
+ if fixTextMatch := fixTextPattern.FindStringSubmatch(ruleContent); len(fixTextMatch) >= 3 {
+ meta.Remediation = s.cleanXMLText(fixTextMatch[2])
+ }
+ }
+
+ // Extract section from rule ID (e.g., "1.1.1" from rule naming)
+ meta.Section = s.extractSection(ruleID)
+
+ metadata[ruleID] = meta
+
+ // Debug logging for metadata extraction verification
+ s.logger.WithFields(map[string]interface{}{
+ "rule_id": ruleID,
+ "title": meta.Title,
+ "title_len": len(meta.Title),
+ "desc_len": len(meta.Description),
+ "desc_preview": truncateString(meta.Description, 100),
+ "remediation_len": len(meta.Remediation),
+ "severity": meta.Severity,
+ "section": meta.Section,
+ }).Debug("Extracted rule metadata")
+ }
+
+ // Count rules with actual content for debugging
+ withTitle := 0
+ withDesc := 0
+ withRemediation := 0
+ for _, m := range metadata {
+ if m.Title != "" {
+ withTitle++
+ }
+ if m.Description != "" {
+ withDesc++
+ }
+ if m.Remediation != "" {
+ withRemediation++
+ }
+ }
+
+ s.logger.WithFields(map[string]interface{}{
+ "total_rules": len(metadata),
+ "with_title": withTitle,
+ "with_description": withDesc,
+ "with_remediation": withRemediation,
+ }).Info("Extracted rule metadata summary")
+
+ return metadata
+}
+
+// cleanXMLText removes HTML/XML tags and cleans up whitespace
+func (s *OpenSCAPScanner) cleanXMLText(text string) string {
+ // Remove HTML tags
+ htmlPattern := regexp.MustCompile(`<[^>]+>`)
+ text = htmlPattern.ReplaceAllString(text, " ")
+
+ // Decode common HTML entities
+ text = strings.ReplaceAll(text, "<", "<")
+ text = strings.ReplaceAll(text, ">", ">")
+ text = strings.ReplaceAll(text, "&", "&")
+ text = strings.ReplaceAll(text, """, "\"")
+ text = strings.ReplaceAll(text, "
", "\n")
+ text = strings.ReplaceAll(text, "
", "\n")
+
+ // Clean up whitespace
+ whitespacePattern := regexp.MustCompile(`\s+`)
+ text = whitespacePattern.ReplaceAllString(text, " ")
+
+ return strings.TrimSpace(text)
+}
+
+// truncateString truncates a string to maxLen characters for logging
+func truncateString(s string, maxLen int) string {
+ if len(s) <= maxLen {
+ return s
+ }
+ return s[:maxLen] + "..."
+}
+
+// extractSection attempts to extract a section number from the rule ID
+func (s *OpenSCAPScanner) extractSection(ruleID string) string {
+ // Look for patterns like "1_1_1" or "1.1.1" in the rule ID
+ sectionPattern := regexp.MustCompile(`(\d+[_\.]\d+(?:[_\.]\d+)*)`)
+ if match := sectionPattern.FindString(ruleID); match != "" {
+ // Convert underscores to dots for display
+ return strings.ReplaceAll(match, "_", ".")
+ }
+ return ""
+}
+
+// mapResult maps XCCDF result to our status
+func (s *OpenSCAPScanner) mapResult(result string) string {
+ switch strings.ToLower(result) {
+ case "pass":
+ return "pass"
+ case "fail":
+ return "fail"
+ case "error":
+ return "fail"
+ case "informational":
+ return "warn"
+ case "notselected", "notchecked":
+ return "skip"
+ case "notapplicable":
+ return "notapplicable"
+ default:
+ return "skip"
+ }
+}
+
+// extractTitle extracts a readable title from a rule ID
+func (s *OpenSCAPScanner) extractTitle(ruleID string) string {
+ // Remove prefix and convert underscores to spaces
+ title := strings.TrimPrefix(ruleID, "xccdf_org.ssgproject.content_rule_")
+ title = strings.ReplaceAll(title, "_", " ")
+
+ // Capitalize first letter
+ if len(title) > 0 {
+ title = strings.ToUpper(title[:1]) + title[1:]
+ }
+
+ return title
+}
+
+// Cleanup removes OpenSCAP and related packages
+// Note: This is optional - packages can be left installed if desired
+func (s *OpenSCAPScanner) Cleanup() error {
+ if !s.available {
+ s.logger.Debug("OpenSCAP not installed, nothing to clean up")
+ return nil
+ }
+
+ s.logger.Info("Removing OpenSCAP packages...")
+
+ // Create context with timeout for package operations
+ ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
+ defer cancel()
+
+ // Environment for non-interactive apt operations
+ nonInteractiveEnv := append(os.Environ(),
+ "DEBIAN_FRONTEND=noninteractive",
+ "NEEDRESTART_MODE=a",
+ "NEEDRESTART_SUSPEND=1",
+ )
+
+ var removeCmd *exec.Cmd
+
+ switch s.osInfo.Family {
+ case "debian":
+ removeCmd = exec.CommandContext(ctx, "apt-get", "remove", "-y", "-qq",
+ "-o", "Dpkg::Options::=--force-confdef",
+ "-o", "Dpkg::Options::=--force-confold",
+ "openscap-scanner", "ssg-debderived", "ssg-base")
+ removeCmd.Env = nonInteractiveEnv
+ case "rhel":
+ if _, err := exec.LookPath("dnf"); err == nil {
+ removeCmd = exec.CommandContext(ctx, "dnf", "remove", "-y", "-q", "openscap-scanner", "scap-security-guide")
+ } else {
+ removeCmd = exec.CommandContext(ctx, "yum", "remove", "-y", "-q", "openscap-scanner", "scap-security-guide")
+ }
+ case "suse":
+ removeCmd = exec.CommandContext(ctx, "zypper", "--non-interactive", "remove", "openscap-utils", "scap-security-guide")
+ default:
+ s.logger.Debug("Unknown OS family, skipping package removal")
+ return nil
+ }
+
+ output, err := removeCmd.CombinedOutput()
+ if err != nil {
+ if ctx.Err() == context.DeadlineExceeded {
+ s.logger.Warn("OpenSCAP removal timed out after 3 minutes")
+ return fmt.Errorf("removal timed out after 3 minutes")
+ }
+ s.logger.WithError(err).WithField("output", string(output)).Warn("Failed to remove OpenSCAP packages")
+ // Don't return error - cleanup is best-effort
+ return nil
+ }
+
+ s.logger.Info("OpenSCAP packages removed successfully")
+ s.available = false
+ s.version = ""
+
+ return nil
+}
diff --git a/internal/integrations/compliance/oscap_docker.go b/internal/integrations/compliance/oscap_docker.go
new file mode 100644
index 0000000..944d089
--- /dev/null
+++ b/internal/integrations/compliance/oscap_docker.go
@@ -0,0 +1,386 @@
+package compliance
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "os/exec"
+ "regexp"
+ "strings"
+ "time"
+
+ "patchmon-agent/pkg/models"
+
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ oscapDockerBinary = "oscap-docker"
+)
+
+// OscapDockerScanner handles Docker image/container vulnerability scanning using oscap-docker
+type OscapDockerScanner struct {
+ logger *logrus.Logger
+ available bool
+}
+
+// NewOscapDockerScanner creates a new oscap-docker scanner
+func NewOscapDockerScanner(logger *logrus.Logger) *OscapDockerScanner {
+ s := &OscapDockerScanner{
+ logger: logger,
+ }
+ s.checkAvailability()
+ return s
+}
+
+// IsAvailable returns whether oscap-docker is available
+func (s *OscapDockerScanner) IsAvailable() bool {
+ return s.available
+}
+
+// checkAvailability checks if oscap-docker tool is available
+func (s *OscapDockerScanner) checkAvailability() {
+ // Check if oscap-docker binary exists
+ path, err := exec.LookPath(oscapDockerBinary)
+ if err != nil {
+ s.logger.Debug("oscap-docker binary not found")
+ s.available = false
+ return
+ }
+
+ s.logger.WithField("path", path).Debug("oscap-docker binary found")
+
+ // Check if docker is also available (required for oscap-docker)
+ _, err = exec.LookPath("docker")
+ if err != nil {
+ s.logger.Debug("Docker binary not found - oscap-docker requires Docker")
+ s.available = false
+ return
+ }
+
+ // Check if Docker daemon is running
+ cmd := exec.Command("docker", "info")
+ if err := cmd.Run(); err != nil {
+ s.logger.Debug("Docker daemon not responding - oscap-docker requires Docker")
+ s.available = false
+ return
+ }
+
+ s.available = true
+ s.logger.Debug("oscap-docker is available for container image scanning")
+}
+
+// ScanImage scans a Docker image for CVEs using oscap-docker
+func (s *OscapDockerScanner) ScanImage(ctx context.Context, imageName string) (*models.ComplianceScan, error) {
+ if !s.available {
+ return nil, fmt.Errorf("oscap-docker is not available")
+ }
+
+ if imageName == "" {
+ return nil, fmt.Errorf("image name is required")
+ }
+
+ startTime := time.Now()
+
+ s.logger.WithField("image", imageName).Info("Scanning Docker image for CVEs...")
+
+ // Run oscap-docker image-cve
+ // This will:
+ // 1. Attach to the Docker image
+ // 2. Determine OS variant/version
+ // 3. Download applicable CVE stream (OVAL data)
+ // 4. Run vulnerability scan
+ cmd := exec.CommandContext(ctx, oscapDockerBinary, "image-cve", imageName)
+ output, err := cmd.CombinedOutput()
+
+ if err != nil {
+ if ctx.Err() != nil {
+ return nil, fmt.Errorf("scan cancelled: %w", ctx.Err())
+ }
+ // oscap-docker exits non-zero when vulnerabilities are found
+ // Check if we got any output to parse
+ if len(output) == 0 {
+ return nil, fmt.Errorf("oscap-docker failed: %w", err)
+ }
+ s.logger.WithError(err).Debug("oscap-docker exited with error, parsing output for results")
+ }
+
+ // Parse the output
+ scan := s.parseImageCveOutput(string(output), imageName)
+ scan.StartedAt = startTime
+ now := time.Now()
+ scan.CompletedAt = &now
+ scan.Status = "completed"
+
+ s.logger.WithFields(logrus.Fields{
+ "image": imageName,
+ "vulnerabilities": scan.Failed,
+ "total_cves": scan.TotalRules,
+ }).Info("Docker image CVE scan completed")
+
+ return scan, nil
+}
+
+// ScanContainer scans a running container for CVEs
+func (s *OscapDockerScanner) ScanContainer(ctx context.Context, containerName string) (*models.ComplianceScan, error) {
+ if !s.available {
+ return nil, fmt.Errorf("oscap-docker is not available")
+ }
+
+ if containerName == "" {
+ return nil, fmt.Errorf("container name is required")
+ }
+
+ startTime := time.Now()
+
+ s.logger.WithField("container", containerName).Info("Scanning Docker container for CVEs...")
+
+ // Run oscap-docker container-cve
+ cmd := exec.CommandContext(ctx, oscapDockerBinary, "container-cve", containerName)
+ output, err := cmd.CombinedOutput()
+
+ if err != nil {
+ if ctx.Err() != nil {
+ return nil, fmt.Errorf("scan cancelled: %w", ctx.Err())
+ }
+ if len(output) == 0 {
+ return nil, fmt.Errorf("oscap-docker failed: %w", err)
+ }
+ s.logger.WithError(err).Debug("oscap-docker exited with error, parsing output for results")
+ }
+
+ // Parse the output
+ scan := s.parseContainerCveOutput(string(output), containerName)
+ scan.StartedAt = startTime
+ now := time.Now()
+ scan.CompletedAt = &now
+ scan.Status = "completed"
+
+ s.logger.WithFields(logrus.Fields{
+ "container": containerName,
+ "vulnerabilities": scan.Failed,
+ "total_cves": scan.TotalRules,
+ }).Info("Docker container CVE scan completed")
+
+ return scan, nil
+}
+
+// ScanAllImages scans all Docker images on the system
+func (s *OscapDockerScanner) ScanAllImages(ctx context.Context) ([]*models.ComplianceScan, error) {
+ if !s.available {
+ return nil, fmt.Errorf("oscap-docker is not available")
+ }
+
+ // Get list of all images
+ cmd := exec.CommandContext(ctx, "docker", "images", "--format", "{{.Repository}}:{{.Tag}}")
+ output, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("failed to list Docker images: %w", err)
+ }
+
+ var scans []*models.ComplianceScan
+ scanner := bufio.NewScanner(strings.NewReader(string(output)))
+
+ for scanner.Scan() {
+ imageName := strings.TrimSpace(scanner.Text())
+ if imageName == "" || imageName == ":" {
+ continue
+ }
+
+ scan, err := s.ScanImage(ctx, imageName)
+ if err != nil {
+ s.logger.WithError(err).WithField("image", imageName).Warn("Failed to scan image, skipping")
+ continue
+ }
+ scans = append(scans, scan)
+ }
+
+ return scans, nil
+}
+
+// parseImageCveOutput parses oscap-docker image-cve output
+func (s *OscapDockerScanner) parseImageCveOutput(output string, imageName string) *models.ComplianceScan {
+ scan := &models.ComplianceScan{
+ ProfileName: fmt.Sprintf("Docker Image CVE Scan: %s", imageName),
+ ProfileType: "oscap-docker",
+ Results: make([]models.ComplianceResult, 0),
+ }
+
+ // Parse CVE results
+ // oscap-docker output format varies, but typically includes lines like:
+ // CVE-2021-44228 - Critical - Description...
+ // Or in OVAL format with true/false results
+
+ // Pattern for CVE lines
+ cvePattern := regexp.MustCompile(`(CVE-\d{4}-\d+)`)
+ severityPattern := regexp.MustCompile(`(?i)(critical|high|important|medium|moderate|low)`)
+
+ lines := strings.Split(output, "\n")
+ seenCVEs := make(map[string]bool)
+
+ for _, line := range lines {
+ line = strings.TrimSpace(line)
+ if line == "" {
+ continue
+ }
+
+ // Look for CVE identifiers
+ cveMatches := cvePattern.FindStringSubmatch(line)
+ if len(cveMatches) > 0 {
+ cveID := cveMatches[1]
+
+ // Skip duplicates
+ if seenCVEs[cveID] {
+ continue
+ }
+ seenCVEs[cveID] = true
+
+ // Determine severity
+ severity := "medium" // default
+ severityMatch := severityPattern.FindStringSubmatch(line)
+ if len(severityMatch) > 0 {
+ severity = strings.ToLower(severityMatch[1])
+ // Normalize severity names
+ switch severity {
+ case "important":
+ severity = "high"
+ case "moderate":
+ severity = "medium"
+ }
+ }
+
+ scan.Results = append(scan.Results, models.ComplianceResult{
+ RuleID: cveID,
+ Title: line,
+ Status: "fail", // CVEs found are failures
+ Severity: severity,
+ Section: "Container Vulnerabilities",
+ })
+ scan.Failed++
+ scan.TotalRules++
+ }
+ }
+
+ // If no CVEs found, mark as passed
+ if scan.TotalRules == 0 {
+ scan.Passed = 1
+ scan.TotalRules = 1
+ scan.Score = 100.0
+ scan.Results = append(scan.Results, models.ComplianceResult{
+ RuleID: "no-cves",
+ Title: "No known CVEs found in image",
+ Status: "pass",
+ Section: "Container Vulnerabilities",
+ })
+ } else {
+ // Calculate score based on severity
+ // Critical = 10 points, High = 5 points, Medium = 2 points, Low = 1 point
+ totalPenalty := 0
+ for _, result := range scan.Results {
+ switch result.Severity {
+ case "critical":
+ totalPenalty += 10
+ case "high":
+ totalPenalty += 5
+ case "medium":
+ totalPenalty += 2
+ case "low":
+ totalPenalty += 1
+ }
+ }
+ // Score decreases with more/worse vulnerabilities
+ // Max penalty of 100 points
+ if totalPenalty > 100 {
+ totalPenalty = 100
+ }
+ scan.Score = float64(100 - totalPenalty)
+ if scan.Score < 0 {
+ scan.Score = 0
+ }
+ }
+
+ return scan
+}
+
+// parseContainerCveOutput parses oscap-docker container-cve output
+func (s *OscapDockerScanner) parseContainerCveOutput(output string, containerName string) *models.ComplianceScan {
+ // Reuse image parsing logic - output format is similar
+ scan := s.parseImageCveOutput(output, containerName)
+ scan.ProfileName = fmt.Sprintf("Docker Container CVE Scan: %s", containerName)
+ return scan
+}
+
+// GetVersion returns the oscap-docker version
+func (s *OscapDockerScanner) GetVersion() string {
+ if !s.available {
+ return ""
+ }
+
+ cmd := exec.Command(oscapDockerBinary, "--version")
+ output, err := cmd.Output()
+ if err != nil {
+ return ""
+ }
+
+ return strings.TrimSpace(string(output))
+}
+
+// EnsureInstalled checks if oscap-docker is installed and attempts to install if not
+func (s *OscapDockerScanner) EnsureInstalled() error {
+ // Re-check availability
+ s.checkAvailability()
+
+ if s.available {
+ s.logger.Debug("oscap-docker is already available")
+ return nil
+ }
+
+ s.logger.Info("Attempting to install oscap-docker...")
+
+ // Detect package manager and install
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
+ defer cancel()
+
+ // Try different package managers with appropriate packages
+ if _, err := exec.LookPath("apt-get"); err == nil {
+ // Debian/Ubuntu - oscap-docker requires the 'atomic' package which is NOT available on Ubuntu
+ // oscap-docker is primarily a Red Hat/Fedora tool that depends on atomic
+ // See: https://answers.launchpad.net/ubuntu/+source/openscap/+question/242354
+ s.logger.Warn("oscap-docker is not supported on Ubuntu/Debian - it requires the 'atomic' package which is only available on RHEL/Fedora")
+ return fmt.Errorf("oscap-docker is not available on Ubuntu/Debian (requires 'atomic' package)")
+ } else if _, err := exec.LookPath("dnf"); err == nil {
+ // RHEL 8+/Fedora - oscap-docker is available via openscap-containers
+ s.logger.Info("Installing openscap-containers for RHEL/Fedora...")
+ installCmd := exec.CommandContext(ctx, "dnf", "install", "-y", "openscap-containers")
+ output, err := installCmd.CombinedOutput()
+ if err != nil {
+ s.logger.WithError(err).WithField("output", string(output)).Warn("Failed to install openscap-containers")
+ return fmt.Errorf("failed to install openscap-containers: %w", err)
+ }
+ } else if _, err := exec.LookPath("yum"); err == nil {
+ // RHEL 7/CentOS 7
+ s.logger.Info("Installing openscap-containers for CentOS/RHEL 7...")
+ installCmd := exec.CommandContext(ctx, "yum", "install", "-y", "openscap-containers")
+ output, err := installCmd.CombinedOutput()
+ if err != nil {
+ s.logger.WithError(err).WithField("output", string(output)).Warn("Failed to install openscap-containers")
+ return fmt.Errorf("failed to install openscap-containers: %w", err)
+ }
+ } else if _, err := exec.LookPath("apk"); err == nil {
+ // Alpine - oscap-docker is not available
+ s.logger.Warn("oscap-docker is not available on Alpine Linux")
+ return fmt.Errorf("oscap-docker is not available on Alpine Linux")
+ } else {
+ return fmt.Errorf("no supported package manager found")
+ }
+
+ // Re-check availability after install
+ s.checkAvailability()
+ if !s.available {
+ s.logger.Warn("oscap-docker binary not found after installation - it may not be available for this OS version")
+ return fmt.Errorf("oscap-docker still not available after installation attempt")
+ }
+
+ s.logger.Info("oscap-docker installed successfully")
+ return nil
+}
diff --git a/internal/integrations/manager.go b/internal/integrations/manager.go
index cadf826..574c07e 100644
--- a/internal/integrations/manager.go
+++ b/internal/integrations/manager.go
@@ -15,10 +15,10 @@ import (
// Manager orchestrates integration discovery and data collection
type Manager struct {
- integrations []Integration
- logger *logrus.Logger
- mu sync.RWMutex
- isEnabledChecker func(string) bool // Optional function to check if integration is enabled
+ integrations []Integration
+ logger *logrus.Logger
+ mu sync.RWMutex
+ isEnabledChecker func(string) bool // Optional function to check if integration is enabled
}
// NewManager creates a new integration manager
diff --git a/internal/network/network.go b/internal/network/network.go
index 63c15ba..9b100d2 100644
--- a/internal/network/network.go
+++ b/internal/network/network.go
@@ -160,7 +160,7 @@ func (m *Manager) getNetworkInterfaces() []models.NetworkInterface {
if ipnet, ok := addr.(*net.IPNet); ok {
var family string
var gateway string
-
+
if ipnet.IP.To4() != nil {
family = constants.IPFamilyIPv4
gateway = ipv4Gateway
@@ -243,7 +243,7 @@ func (m *Manager) getInterfaceGateway(interfaceName string, ipv6 bool) string {
// Use ip route (defaults to IPv4)
cmd = exec.Command("ip", "route", "show", "dev", interfaceName)
}
-
+
output, err := cmd.Output()
if err == nil {
lines := strings.Split(string(output), "\n")
diff --git a/internal/repositories/apk.go b/internal/repositories/apk.go
index d14ed70..0be33b4 100644
--- a/internal/repositories/apk.go
+++ b/internal/repositories/apk.go
@@ -59,7 +59,7 @@ func (m *APKManager) GetRepositories() ([]models.Repository, error) {
// findRepoFile locates the APK repositories file
func (m *APKManager) findRepoFile() (string, error) {
repoFile := "/etc/apk/repositories"
-
+
// Check if file exists
if _, err := os.Stat(repoFile); err != nil {
if os.IsNotExist(err) {
@@ -91,7 +91,7 @@ func (m *APKManager) parseRepoFile(filename string) ([]models.Repository, error)
// Regex to match repository URL pattern
// Matches: http://... or https://... followed by path
urlRegex := regexp.MustCompile(`^(@\S+\s+)?(https?://[^\s]+)`)
-
+
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
@@ -161,7 +161,7 @@ func (m *APKManager) parseRepoLine(line string, urlRegex *regexp.Regexp) *models
func (m *APKManager) extractDistributionAndComponents(url string) (distribution, components string) {
// Split URL by "/"
parts := strings.Split(url, "/")
-
+
// Find "alpine" in the path
alpineIndex := -1
for i, part := range parts {
@@ -229,4 +229,3 @@ func (m *APKManager) isValidRepoURL(url string) bool {
func (m *APKManager) isSecureURL(url string) bool {
return strings.HasPrefix(url, "https://")
}
-
diff --git a/internal/utils/offset.go b/internal/utils/offset.go
index c9efd7e..609c965 100644
--- a/internal/utils/offset.go
+++ b/internal/utils/offset.go
@@ -39,5 +39,3 @@ func hashString(s string) uint64 {
h.Write([]byte(s))
return h.Sum64()
}
-
-
diff --git a/internal/utils/timezone.go b/internal/utils/timezone.go
index 3881437..cf2d61d 100644
--- a/internal/utils/timezone.go
+++ b/internal/utils/timezone.go
@@ -22,19 +22,19 @@ func GetTimezone() string {
// Defaults to UTC if not set or invalid
func GetTimezoneLocation() *time.Location {
tz := GetTimezone()
-
+
// Handle UTC explicitly
if tz == "UTC" || tz == "Etc/UTC" {
return time.UTC
}
-
+
// Try to load the timezone
loc, err := time.LoadLocation(tz)
if err != nil {
// Fallback to UTC if timezone is invalid
return time.UTC
}
-
+
return loc
}
@@ -56,40 +56,3 @@ func GetCurrentTimeUTC() time.Time {
func FormatTimeISO(t time.Time) string {
return t.Format(time.RFC3339)
}
-
-// ParseTime parses a time string and returns a time.Time
-// Handles various formats including RFC3339 and Unix timestamps
-func ParseTime(timeStr string) (time.Time, error) {
- // Try RFC3339 first (ISO 8601)
- if t, err := time.Parse(time.RFC3339, timeStr); err == nil {
- return t, nil
- }
-
- // Try RFC3339Nano
- if t, err := time.Parse(time.RFC3339Nano, timeStr); err == nil {
- return t, nil
- }
-
- // Try common formats
- formats := []string{
- "2006-01-02T15:04:05",
- "2006-01-02 15:04:05",
- "2006-01-02T15:04:05Z07:00",
- }
-
- for _, format := range formats {
- if t, err := time.Parse(format, timeStr); err == nil {
- return t, nil
- }
- }
-
- // If all else fails, return zero time
- return time.Time{}, nil
-}
-
-// FormatTimeForDisplay formats a time for display in the configured timezone
-func FormatTimeForDisplay(t time.Time) string {
- loc := GetTimezoneLocation()
- return t.In(loc).Format("2006-01-02T15:04:05")
-}
-
diff --git a/internal/utils/utils.go b/internal/utils/utils.go
index 4347782..2da4144 100644
--- a/internal/utils/utils.go
+++ b/internal/utils/utils.go
@@ -3,9 +3,18 @@ package utils
import (
"fmt"
"net"
+ "os"
+ "strings"
"time"
)
+// IsProductionEnvironment checks if the agent is running in a production environment
+// SECURITY: Used to block insecure configurations in production
+func IsProductionEnvironment() bool {
+ env := strings.ToLower(os.Getenv("PATCHMON_ENV"))
+ return env == "production" || env == "prod"
+}
+
// TcpPing performs a simple TCP connection test to the specified host and port
func TcpPing(host, port string) bool {
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%s", host, port), 5*time.Second)
diff --git a/internal/version/version.go b/internal/version/version.go
index 504fe5f..4a1f71f 100644
--- a/internal/version/version.go
+++ b/internal/version/version.go
@@ -1,4 +1,4 @@
package version
// Version represents the current version of the patchmon-agent
-const Version = "1.4.0"
+const Version = "1.5.55"
diff --git a/patchmon-agent b/patchmon-agent
new file mode 100755
index 0000000..2014df8
Binary files /dev/null and b/patchmon-agent differ
diff --git a/pkg/models/compliance.go b/pkg/models/compliance.go
new file mode 100644
index 0000000..ff14a78
--- /dev/null
+++ b/pkg/models/compliance.go
@@ -0,0 +1,85 @@
+package models
+
+import "time"
+
+// ComplianceRule represents a compliance rule definition
+type ComplianceRule struct {
+ RuleID string `json:"rule_id"`
+ Title string `json:"title"`
+ Description string `json:"description,omitempty"`
+ Section string `json:"section,omitempty"`
+ Severity string `json:"severity,omitempty"` // low, medium, high, critical
+ Remediation string `json:"remediation,omitempty"`
+}
+
+// ComplianceResult represents a single rule evaluation result
+type ComplianceResult struct {
+ RuleID string `json:"rule_ref"` // Backend expects rule_ref, not rule_id
+ Title string `json:"title"`
+ Status string `json:"status"` // pass, fail, warn, skip, notapplicable, error
+ Finding string `json:"finding,omitempty"`
+ Actual string `json:"actual,omitempty"` // Actual value found on the system
+ Expected string `json:"expected,omitempty"` // Expected/required value
+ Section string `json:"section,omitempty"`
+ Description string `json:"description,omitempty"`
+ Severity string `json:"severity,omitempty"`
+ Remediation string `json:"remediation,omitempty"`
+}
+
+// ComplianceScan represents results of a compliance scan
+type ComplianceScan struct {
+ ProfileName string `json:"profile_name"`
+ ProfileType string `json:"profile_type"` // openscap, docker-bench
+ Status string `json:"status"` // completed, failed, in_progress
+ Score float64 `json:"score"`
+ TotalRules int `json:"total_rules"`
+ Passed int `json:"passed"`
+ Failed int `json:"failed"`
+ Warnings int `json:"warnings"`
+ Skipped int `json:"skipped"`
+ NotApplicable int `json:"not_applicable"`
+ StartedAt time.Time `json:"started_at"`
+ CompletedAt *time.Time `json:"completed_at,omitempty"`
+ Results []ComplianceResult `json:"results"`
+ Error string `json:"error,omitempty"`
+ RemediationApplied bool `json:"remediation_applied,omitempty"`
+ RemediationCount int `json:"remediation_count,omitempty"` // Number of rules remediated
+}
+
+// ComplianceData represents all compliance-related data
+type ComplianceData struct {
+ Scans []ComplianceScan `json:"scans"`
+ OSInfo ComplianceOSInfo `json:"os_info"`
+ ScannerInfo ComplianceScannerInfo `json:"scanner_info"`
+}
+
+// ComplianceOSInfo represents OS information for compliance context
+type ComplianceOSInfo struct {
+ Family string `json:"family"` // debian, rhel, suse
+ Name string `json:"name"` // ubuntu, rocky, debian
+ Version string `json:"version"` // 22.04, 9, 12
+}
+
+// ComplianceScannerInfo represents scanner availability information
+type ComplianceScannerInfo struct {
+ OpenSCAPAvailable bool `json:"openscap_available"`
+ OpenSCAPVersion string `json:"openscap_version,omitempty"`
+ DockerBenchAvailable bool `json:"docker_bench_available"`
+ OscapDockerAvailable bool `json:"oscap_docker_available"`
+ AvailableProfiles []string `json:"available_profiles,omitempty"`
+}
+
+// CompliancePayload represents the payload sent to the compliance endpoint
+type CompliancePayload struct {
+ ComplianceData
+ Hostname string `json:"hostname"`
+ MachineID string `json:"machine_id"`
+ AgentVersion string `json:"agent_version"`
+}
+
+// ComplianceResponse represents the response from the compliance endpoint
+type ComplianceResponse struct {
+ Message string `json:"message"`
+ ScanID string `json:"scan_id,omitempty"`
+ ScansReceived int `json:"scans_received"`
+}
diff --git a/pkg/models/models.go b/pkg/models/models.go
index a411235..9afa3ef 100644
--- a/pkg/models/models.go
+++ b/pkg/models/models.go
@@ -106,6 +106,8 @@ type PingResponse struct {
Message string `json:"message"`
Timestamp string `json:"timestamp"`
FriendlyName string `json:"friendlyName"`
+ AgentStartup bool `json:"agentStartup,omitempty"`
+ Integrations map[string]bool `json:"integrations,omitempty"` // Server-side integration enable states
CrontabUpdate *CrontabUpdateInfo `json:"crontabUpdate,omitempty"`
}
@@ -165,6 +167,72 @@ type IntegrationStatusResponse struct {
Integrations map[string]bool `json:"integrations"`
}
+// IntegrationSetupStatus represents the setup status of an integration
+type IntegrationSetupStatus struct {
+ Integration string `json:"integration"`
+ Enabled bool `json:"enabled"`
+ Status string `json:"status"` // "ready", "installing", "removing", "error"
+ Message string `json:"message"`
+ Components map[string]string `json:"components,omitempty"` // Component name -> status
+ ScannerInfo *ComplianceScannerDetails `json:"scanner_info,omitempty"`
+}
+
+// ComplianceScannerDetails contains detailed OpenSCAP scanner information
+type ComplianceScannerDetails struct {
+ // OpenSCAP info
+ OpenSCAPVersion string `json:"openscap_version,omitempty"`
+ OpenSCAPAvailable bool `json:"openscap_available"`
+
+ // SCAP Content info
+ ContentFile string `json:"content_file,omitempty"`
+ ContentPackage string `json:"content_package,omitempty"` // e.g., "ssg-base 0.1.76"
+ SSGVersion string `json:"ssg_version,omitempty"` // Just the version number (e.g., "0.1.76")
+ SSGMinVersion string `json:"ssg_min_version,omitempty"` // Minimum required version for this OS
+ SSGNeedsUpgrade bool `json:"ssg_needs_upgrade,omitempty"` // True if upgrade is recommended
+ SSGUpgradeMessage string `json:"ssg_upgrade_message,omitempty"` // Message explaining why upgrade is needed
+
+ // Available scan profiles
+ AvailableProfiles []ScanProfileInfo `json:"available_profiles,omitempty"`
+
+ // Docker Bench info
+ DockerBenchAvailable bool `json:"docker_bench_available"`
+ DockerBenchVersion string `json:"docker_bench_version,omitempty"`
+
+ // oscap-docker info (for Docker image CVE scanning)
+ OscapDockerAvailable bool `json:"oscap_docker_available"`
+
+ // OS info for content matching
+ OSName string `json:"os_name,omitempty"`
+ OSVersion string `json:"os_version,omitempty"`
+ OSFamily string `json:"os_family,omitempty"`
+
+ // Content compatibility
+ ContentMismatch bool `json:"content_mismatch,omitempty"`
+ MismatchWarning string `json:"mismatch_warning,omitempty"`
+}
+
+// ScanProfileInfo describes an available scan profile
+type ScanProfileInfo struct {
+ ID string `json:"id"` // Internal ID (e.g., "level1_server") or full XCCDF ID
+ Name string `json:"name"` // Display name (e.g., "CIS Level 1 Server")
+ Description string `json:"description,omitempty"` // Brief description
+ Type string `json:"type"` // "openscap" or "docker-bench"
+ XCCDFId string `json:"xccdf_id,omitempty"` // Full XCCDF profile ID
+ Category string `json:"category,omitempty"` // Category: "cis", "stig", "pci-dss", "hipaa", etc.
+}
+
+// ComplianceScanOptions represents configurable scan options
+type ComplianceScanOptions struct {
+ ProfileID string `json:"profile_id"` // Profile to use for scan
+ RuleID string `json:"rule_id,omitempty"` // Specific rule ID to scan/remediate (for single rule operations)
+ EnableRemediation bool `json:"enable_remediation,omitempty"` // Enable automatic remediation
+ RemediationType string `json:"remediation_type,omitempty"` // "online", "offline", "script"
+ FetchRemoteResources bool `json:"fetch_remote_resources,omitempty"` // Fetch remote OVAL content
+ TailoringFile string `json:"tailoring_file,omitempty"` // Path to tailoring file
+ OutputFormat string `json:"output_format,omitempty"` // "html", "xml", "arf"
+ Timeout int `json:"timeout,omitempty"` // Scan timeout in minutes
+}
+
// Credentials holds API authentication information
type Credentials struct {
APIID string `yaml:"api_id" mapstructure:"api_id"`
@@ -173,13 +241,14 @@ type Credentials struct {
// Config represents agent configuration
type Config struct {
- PatchmonServer string `yaml:"patchmon_server" mapstructure:"patchmon_server"`
- APIVersion string `yaml:"api_version" mapstructure:"api_version"`
- CredentialsFile string `yaml:"credentials_file" mapstructure:"credentials_file"`
- LogFile string `yaml:"log_file" mapstructure:"log_file"`
- LogLevel string `yaml:"log_level" mapstructure:"log_level"`
- SkipSSLVerify bool `yaml:"skip_ssl_verify" mapstructure:"skip_ssl_verify"`
- UpdateInterval int `yaml:"update_interval" mapstructure:"update_interval"` // Interval in minutes
- ReportOffset int `yaml:"report_offset" mapstructure:"report_offset"` // Offset in seconds
- Integrations map[string]bool `yaml:"integrations" mapstructure:"integrations"`
+ PatchmonServer string `yaml:"patchmon_server" mapstructure:"patchmon_server"`
+ APIVersion string `yaml:"api_version" mapstructure:"api_version"`
+ CredentialsFile string `yaml:"credentials_file" mapstructure:"credentials_file"`
+ LogFile string `yaml:"log_file" mapstructure:"log_file"`
+ LogLevel string `yaml:"log_level" mapstructure:"log_level"`
+ SkipSSLVerify bool `yaml:"skip_ssl_verify" mapstructure:"skip_ssl_verify"`
+ UpdateInterval int `yaml:"update_interval" mapstructure:"update_interval"` // Interval in minutes
+ ReportOffset int `yaml:"report_offset" mapstructure:"report_offset"` // Offset in seconds
+ Integrations map[string]bool `yaml:"integrations" mapstructure:"integrations"`
+ ComplianceOnDemandOnly bool `yaml:"compliance_on_demand_only" mapstructure:"compliance_on_demand_only"` // Skip compliance during scheduled reports
}