stage 1 of doing new cidrs/ site setup

This commit is contained in:
2025-11-19 13:39:27 -06:00
parent 4a4c33a10b
commit 034f146fa1
16 changed files with 3998 additions and 609 deletions

View File

@@ -16,7 +16,7 @@ from sqlalchemy.orm import Session, joinedload
from web.models import (
Scan, ScanSite, ScanIP, ScanPort, ScanService as ScanServiceModel,
ScanCertificate, ScanTLSVersion
ScanCertificate, ScanTLSVersion, Site, ScanSiteAssociation
)
from web.utils.pagination import paginate, PaginatedResult
from web.utils.validators import validate_config_file, validate_scan_status
@@ -41,8 +41,9 @@ class ScanService:
"""
self.db = db_session
def trigger_scan(self, config_file: str, triggered_by: str = 'manual',
schedule_id: Optional[int] = None, scheduler=None) -> int:
def trigger_scan(self, config_file: str = None, config_id: int = None,
triggered_by: str = 'manual', schedule_id: Optional[int] = None,
scheduler=None) -> int:
"""
Trigger a new scan.
@@ -50,7 +51,8 @@ class ScanService:
queues the scan for background execution.
Args:
config_file: Path to YAML configuration file
config_file: Path to YAML configuration file (legacy, optional)
config_id: Database config ID (preferred, optional)
triggered_by: Source that triggered scan (manual, scheduled, api)
schedule_id: Optional schedule ID if triggered by schedule
scheduler: Optional SchedulerService instance for queuing background jobs
@@ -59,57 +61,106 @@ class ScanService:
Scan ID of the created scan
Raises:
ValueError: If config file is invalid
ValueError: If config is invalid or both/neither config_file and config_id provided
"""
# Validate config file
is_valid, error_msg = validate_config_file(config_file)
if not is_valid:
raise ValueError(f"Invalid config file: {error_msg}")
# Validate that exactly one config source is provided
if not (bool(config_file) ^ bool(config_id)):
raise ValueError("Must provide exactly one of config_file or config_id")
# Convert config_file to full path if it's just a filename
if not config_file.startswith('/'):
config_path = f'/app/configs/{config_file}'
# Handle database config
if config_id:
from web.models import ScanConfig
# Validate config exists
db_config = self.db.query(ScanConfig).filter_by(id=config_id).first()
if not db_config:
raise ValueError(f"Config with ID {config_id} not found")
# Create scan record with config_id
scan = Scan(
timestamp=datetime.utcnow(),
status='running',
config_id=config_id,
title=db_config.title,
triggered_by=triggered_by,
schedule_id=schedule_id,
created_at=datetime.utcnow()
)
self.db.add(scan)
self.db.commit()
self.db.refresh(scan)
logger.info(f"Scan {scan.id} triggered via {triggered_by} with config_id={config_id}")
# Queue background job if scheduler provided
if scheduler:
try:
job_id = scheduler.queue_scan(scan.id, config_id=config_id)
logger.info(f"Scan {scan.id} queued for background execution (job_id={job_id})")
except Exception as e:
logger.error(f"Failed to queue scan {scan.id}: {str(e)}")
# Mark scan as failed if job queuing fails
scan.status = 'failed'
scan.error_message = f"Failed to queue background job: {str(e)}"
self.db.commit()
raise
else:
logger.warning(f"Scan {scan.id} created but not queued (no scheduler provided)")
return scan.id
# Handle legacy YAML config file
else:
config_path = config_file
# Validate config file
is_valid, error_msg = validate_config_file(config_file)
if not is_valid:
raise ValueError(f"Invalid config file: {error_msg}")
# Load config to get title
import yaml
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
# Convert config_file to full path if it's just a filename
if not config_file.startswith('/'):
config_path = f'/app/configs/{config_file}'
else:
config_path = config_file
# Create scan record
scan = Scan(
timestamp=datetime.utcnow(),
status='running',
config_file=config_file,
title=config.get('title', 'Untitled Scan'),
triggered_by=triggered_by,
schedule_id=schedule_id,
created_at=datetime.utcnow()
)
# Load config to get title
import yaml
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
self.db.add(scan)
self.db.commit()
self.db.refresh(scan)
# Create scan record
scan = Scan(
timestamp=datetime.utcnow(),
status='running',
config_file=config_file,
title=config.get('title', 'Untitled Scan'),
triggered_by=triggered_by,
schedule_id=schedule_id,
created_at=datetime.utcnow()
)
logger.info(f"Scan {scan.id} triggered via {triggered_by}")
self.db.add(scan)
self.db.commit()
self.db.refresh(scan)
# Queue background job if scheduler provided
if scheduler:
try:
job_id = scheduler.queue_scan(scan.id, config_file)
logger.info(f"Scan {scan.id} queued for background execution (job_id={job_id})")
except Exception as e:
logger.error(f"Failed to queue scan {scan.id}: {str(e)}")
# Mark scan as failed if job queuing fails
scan.status = 'failed'
scan.error_message = f"Failed to queue background job: {str(e)}"
self.db.commit()
raise
else:
logger.warning(f"Scan {scan.id} created but not queued (no scheduler provided)")
logger.info(f"Scan {scan.id} triggered via {triggered_by}")
return scan.id
# Queue background job if scheduler provided
if scheduler:
try:
job_id = scheduler.queue_scan(scan.id, config_file=config_file)
logger.info(f"Scan {scan.id} queued for background execution (job_id={job_id})")
except Exception as e:
logger.error(f"Failed to queue scan {scan.id}: {str(e)}")
# Mark scan as failed if job queuing fails
scan.status = 'failed'
scan.error_message = f"Failed to queue background job: {str(e)}"
self.db.commit()
raise
else:
logger.warning(f"Scan {scan.id} created but not queued (no scheduler provided)")
return scan.id
def get_scan(self, scan_id: int) -> Optional[Dict[str, Any]]:
"""
@@ -366,6 +417,34 @@ class ScanService:
self.db.add(site)
self.db.flush() # Get site.id for foreign key
# Create ScanSiteAssociation if this site exists in the database
# This links the scan to reusable site definitions
master_site = (
self.db.query(Site)
.filter(Site.name == site_data['name'])
.first()
)
if master_site:
# Check if association already exists (avoid duplicates)
existing_assoc = (
self.db.query(ScanSiteAssociation)
.filter(
ScanSiteAssociation.scan_id == scan_obj.id,
ScanSiteAssociation.site_id == master_site.id
)
.first()
)
if not existing_assoc:
assoc = ScanSiteAssociation(
scan_id=scan_obj.id,
site_id=master_site.id,
created_at=datetime.utcnow()
)
self.db.add(assoc)
logger.debug(f"Created association between scan {scan_obj.id} and site '{master_site.name}' (id={master_site.id})")
# Process each IP in this site
for ip_data in site_data.get('ips', []):
# Create ScanIP record