First release of open core

This commit is contained in:
t
2026-04-02 10:57:36 -04:00
parent 1c94f12d1c
commit 084c1321fc
101 changed files with 8812 additions and 17 deletions

187
pkg/datastore/auth_db.go Normal file
View File

@@ -0,0 +1,187 @@
package datastore
import (
"context"
"database/sql"
"errors"
"time"
"epigas.gitea.cloud/RiskRancher/core/pkg/domain"
)
// ErrNotFound is a standard error we can use across our handlers
var ErrNotFound = errors.New("record not found")
func (s *SQLiteStore) CreateUser(ctx context.Context, email, fullName, passwordHash, globalRole string) (*domain.User, error) {
query := `INSERT INTO users (email, full_name, password_hash, global_role) VALUES (?, ?, ?, ?)`
result, err := s.DB.ExecContext(ctx, query, email, fullName, passwordHash, globalRole)
if err != nil {
return nil, err
}
id, err := result.LastInsertId()
if err != nil {
return nil, err
}
return &domain.User{
ID: int(id),
Email: email,
FullName: fullName,
PasswordHash: passwordHash,
GlobalRole: globalRole,
}, nil
}
func (s *SQLiteStore) GetUserByEmail(ctx context.Context, email string) (*domain.User, error) {
var user domain.User
query := "SELECT id, email, password_hash, full_name, global_role FROM users WHERE email = ? AND is_active = 1"
err := s.DB.QueryRowContext(ctx, query, email).Scan(
&user.ID,
&user.Email,
&user.PasswordHash,
&user.FullName,
&user.GlobalRole,
)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, sql.ErrNoRows // Bouncer says no (either wrong email, or deactivated)
}
return nil, err
}
return &user, nil
}
func (s *SQLiteStore) CreateSession(ctx context.Context, token string, userID int, expiresAt time.Time) error {
query := `INSERT INTO sessions (session_token, user_id, expires_at) VALUES (?, ?, ?)`
_, err := s.DB.ExecContext(ctx, query, token, userID, expiresAt)
return err
}
func (s *SQLiteStore) GetSession(ctx context.Context, token string) (*domain.Session, error) {
query := `SELECT session_token, user_id, expires_at FROM sessions WHERE session_token = ?`
var session domain.Session
err := s.DB.QueryRowContext(ctx, query, token).Scan(
&session.Token,
&session.UserID,
&session.ExpiresAt,
)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, ErrNotFound
}
return nil, err
}
return &session, nil
}
// GetUserByID fetches a user's full record, including their role
func (s *SQLiteStore) GetUserByID(ctx context.Context, id int) (*domain.User, error) {
query := `SELECT id, email, full_name, password_hash, global_role FROM users WHERE id = ?`
var user domain.User
err := s.DB.QueryRowContext(ctx, query, id).Scan(
&user.ID,
&user.Email,
&user.FullName,
&user.PasswordHash,
&user.GlobalRole,
)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, ErrNotFound
}
return nil, err
}
return &user, nil
}
// UpdateUserPassword allows an administrator to overwrite a forgotten password
func (s *SQLiteStore) UpdateUserPassword(ctx context.Context, id int, newPasswordHash string) error {
query := `UPDATE users SET password_hash = ? WHERE id = ?`
_, err := s.DB.ExecContext(ctx, query, newPasswordHash, id)
return err
}
// UpdateUserRole promotes or demotes a user by updating their global_role.
func (s *SQLiteStore) UpdateUserRole(ctx context.Context, id int, newRole string) error {
query := `UPDATE users SET global_role = ? WHERE id = ?`
_, err := s.DB.ExecContext(ctx, query, newRole, id)
return err
}
// DeactivateUserAndReassign securely offboards a user, kicks them out
func (s *SQLiteStore) DeactivateUserAndReassign(ctx context.Context, userID int) error {
var email string
if err := s.DB.QueryRowContext(ctx, "SELECT email FROM users WHERE id = ?", userID).Scan(&email); err != nil {
return err
}
tx, err := s.DB.BeginTx(ctx, nil)
if err != nil {
return err
}
defer tx.Rollback()
_, err = tx.ExecContext(ctx, `UPDATE users SET is_active = 0 WHERE id = ?`, userID)
if err != nil {
return err
}
_, err = tx.ExecContext(ctx, `DELETE FROM ticket_assignments WHERE assignee = ?`, email)
if err != nil {
return err
}
_, err = tx.ExecContext(ctx, `DELETE FROM sessions WHERE user_id = ?`, userID)
if err != nil {
return err
}
return tx.Commit()
}
// GetUserCount returns the total number of registered users in the system.
func (s *SQLiteStore) GetUserCount(ctx context.Context) (int, error) {
var count int
err := s.DB.QueryRowContext(ctx, `SELECT COUNT(*) FROM users`).Scan(&count)
if err != nil {
return 0, err
}
return count, nil
}
func (s *SQLiteStore) GetAllUsers(ctx context.Context) ([]*domain.User, error) {
// Notice the return type is now []*domain.User
rows, err := s.DB.QueryContext(ctx, "SELECT id, email, full_name, global_role FROM users WHERE is_active = 1")
if err != nil {
return nil, err
}
defer rows.Close()
var users []*domain.User
for rows.Next() {
var u domain.User
if err := rows.Scan(&u.ID, &u.Email, &u.FullName, &u.GlobalRole); err == nil {
users = append(users, &u) // 🚀 Appending the memory address!
}
}
return users, nil
}
// DeleteSession removes the token from the database so it can never be used again.
func (s *SQLiteStore) DeleteSession(ctx context.Context, token string) error {
_, err := s.DB.ExecContext(ctx, `DELETE FROM sessions WHERE token = ?`, token)
return err
}

View File

@@ -0,0 +1,73 @@
package datastore
import (
"context"
"testing"
"time"
)
func TestUserAndSessionLifecycle(t *testing.T) {
store := setupTestDB(t)
defer store.DB.Close()
_, err := store.DB.Exec(`
CREATE TABLE users (id INTEGER PRIMARY KEY AUTOINCREMENT, email TEXT UNIQUE, full_name TEXT, password_hash TEXT, global_role TEXT, is_active BOOLEAN DEFAULT 1);
CREATE TABLE sessions (session_token TEXT PRIMARY KEY, user_id INTEGER, expires_at DATETIME);
`)
ctx := context.Background()
user, err := store.CreateUser(ctx, "admin@RiskRancher.com", "doc", "fake_bcrypt_hash", "Admin")
if err != nil {
t.Fatalf("Failed to create user: %v", err)
}
if user.ID == 0 {
t.Errorf("Expected database to return a valid auto-incremented ID, got 0")
}
_, err = store.CreateUser(ctx, "admin@RiskRancher.com", "doc", "another_hash", "Analyst")
if err == nil {
t.Fatalf("Security Failure: Database allowed a duplicate email address!")
}
fetchedUser, err := store.GetUserByEmail(ctx, "admin@RiskRancher.com")
if err != nil {
t.Fatalf("Failed to fetch user by email: %v", err)
}
if fetchedUser.GlobalRole != "Admin" {
t.Errorf("Expected role 'Admin', got '%s'", fetchedUser.GlobalRole)
}
expires := time.Now().Add(24 * time.Hour)
err = store.CreateSession(ctx, "fake_secure_token", user.ID, expires)
if err != nil {
t.Fatalf("Failed to create session: %v", err)
}
session, err := store.GetSession(ctx, "fake_secure_token")
if err != nil {
t.Fatalf("Failed to retrieve session: %v", err)
}
if session.UserID != user.ID {
t.Errorf("Session mapped to wrong user! Expected %d, got %d", user.ID, session.UserID)
}
userByID, err := store.GetUserByID(ctx, user.ID)
if err != nil {
t.Fatalf("Failed to fetch user by ID: %v", err)
}
if userByID.Email != user.Email {
t.Errorf("GetUserByID returned wrong user. Expected %s, got %s", user.Email, userByID.Email)
}
newHash := "new_secure_bcrypt_hash_999"
err = store.UpdateUserPassword(ctx, user.ID, newHash)
if err != nil {
t.Fatalf("Failed to update user password: %v", err)
}
updatedUser, _ := store.GetUserByID(ctx, user.ID)
if updatedUser.PasswordHash != newHash {
t.Errorf("Password hash did not update in the database")
}
}

View File

@@ -0,0 +1,92 @@
package datastore
import (
"database/sql"
"fmt"
"path/filepath"
"sync"
"testing"
_ "github.com/mattn/go-sqlite3"
)
// runChaosEngine fires 100 concurrent workers at the provided database connection
func runChaosEngine(db *sql.DB) int {
db.Exec(`CREATE TABLE IF NOT EXISTS tickets (id INTEGER PRIMARY KEY AUTOINCREMENT, title TEXT, status TEXT)`)
db.Exec(`INSERT INTO tickets (title, status) VALUES ('Seed', 'Open')`)
var wg sync.WaitGroup
errCh := make(chan error, 1000)
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 20; i++ {
tx, _ := db.Begin()
for j := 0; j < 50; j++ {
tx.Exec(`INSERT INTO tickets (title, status) VALUES ('Vuln', 'Open')`)
}
if err := tx.Commit(); err != nil {
errCh <- err
}
}
}()
for w := 0; w < 20; w++ {
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 20; i++ {
if _, err := db.Exec(`UPDATE tickets SET status = 'Patched' WHERE id = 1`); err != nil {
errCh <- err
}
}
}()
}
for r := 0; r < 79; r++ {
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 50; i++ {
rows, err := db.Query(`SELECT COUNT(*) FROM tickets`)
if err != nil {
errCh <- err
} else {
rows.Close()
}
}
}()
}
wg.Wait()
close(errCh)
errorCount := 0
for range errCh {
errorCount++
}
return errorCount
}
func TestSQLiteConcurrency_Tuned_Succeeds(t *testing.T) {
tempDir := t.TempDir()
dbPath := filepath.Join(tempDir, "tuned.db")
dsn := fmt.Sprintf("%s?_journal_mode=WAL&_synchronous=NORMAL&_busy_timeout=5000", dbPath)
db, err := sql.Open("sqlite3", dsn)
if err != nil {
t.Fatalf("Failed to open tuned DB: %v", err)
}
defer db.Close()
db.SetMaxOpenConns(25)
db.SetMaxIdleConns(25)
errors := runChaosEngine(db)
if errors > 0 {
t.Fatalf("FAILED! Tuned engine threw %d errors. It should have queued them perfectly.", errors)
}
t.Log("SUCCESS: 100 concurrent workers survived SQLite chaos with ZERO locked errors.")
}

94
pkg/datastore/db.go Normal file
View File

@@ -0,0 +1,94 @@
package datastore
import (
"database/sql"
"embed"
_ "embed"
"encoding/json"
"log"
"time"
"epigas.gitea.cloud/RiskRancher/core/pkg/domain"
_ "github.com/mattn/go-sqlite3"
)
//go:embed schema.sql
var schemaSQL string
//go:embed defaults/*.json
var defaultAdaptersFS embed.FS
func InitDB(filepath string) *sql.DB {
dsn := "file:" + filepath + "?_journal=WAL&_timeout=5000&_sync=1&_fk=1"
db, err := sql.Open("sqlite3", dsn)
if err != nil {
log.Fatalf("Failed to open database: %v", err)
}
db.SetMaxOpenConns(25)
db.SetMaxIdleConns(25)
db.SetConnMaxLifetime(5 * time.Minute)
migrations := []string{
schemaSQL,
}
if err := RunMigrations(db, migrations); err != nil {
log.Fatalf("Database upgrade failed! Halting boot to protect data: %v", err)
}
SeedAdapters(db)
return db
}
// SeedAdapters reads the embedded JSON files and UPSERTs them into SQLite
func SeedAdapters(db *sql.DB) {
files, err := defaultAdaptersFS.ReadDir("defaults")
if err != nil {
log.Printf("No default adapters found or failed to read: %v", err)
return
}
for _, file := range files {
data, err := defaultAdaptersFS.ReadFile("defaults/" + file.Name())
if err != nil {
log.Printf("Failed to read adapter file %s: %v", file.Name(), err)
continue
}
var adapter domain.Adapter
if err := json.Unmarshal(data, &adapter); err != nil {
log.Printf("Failed to parse adapter JSON %s: %v", file.Name(), err)
continue
}
query := `
INSERT INTO data_adapters (
name, source_name, findings_path, mapping_title,
mapping_asset, mapping_severity, mapping_description, mapping_remediation
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(name) DO UPDATE SET
source_name = excluded.source_name,
findings_path = excluded.findings_path,
mapping_title = excluded.mapping_title,
mapping_asset = excluded.mapping_asset,
mapping_severity = excluded.mapping_severity,
mapping_description = excluded.mapping_description,
mapping_remediation = excluded.mapping_remediation,
updated_at = CURRENT_TIMESTAMP;
`
_, err = db.Exec(query,
adapter.Name, adapter.SourceName, adapter.FindingsPath, adapter.MappingTitle,
adapter.MappingAsset, adapter.MappingSeverity, adapter.MappingDescription, adapter.MappingRemediation,
)
if err != nil {
log.Printf("Failed to seed adapter %s to DB: %v", adapter.Name, err)
} else {
log.Printf("🔌 Successfully loaded adapter: %s", adapter.Name)
}
}
}

View File

@@ -0,0 +1,10 @@
{
"name": "Trivy Container Scan",
"source_name": "Trivy",
"findings_path": "Results.0.Vulnerabilities",
"mapping_title": "VulnerabilityID",
"mapping_asset": "PkgName",
"mapping_severity": "Severity",
"mapping_description": "Title",
"mapping_remediation": "FixedVersion"
}

View File

@@ -0,0 +1,84 @@
package datastore
import (
"context"
"database/sql"
"testing"
"epigas.gitea.cloud/RiskRancher/core/pkg/domain"
_ "github.com/mattn/go-sqlite3" // We need the SQLite driver for the test
)
func setupTestDB(t *testing.T) *SQLiteStore {
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
t.Fatalf("Failed to open in-memory SQLite database: %v", err)
}
store := &SQLiteStore{DB: db}
return store
}
func TestIngestionDiffEngine(t *testing.T) {
store := setupTestDB(t)
defer store.DB.Close()
_, err := store.DB.Exec(`
CREATE TABLE IF NOT EXISTS sla_policies (domain TEXT, severity TEXT, days_to_remediate INTEGER, max_extensions INTEGER, days_to_triage INTEGER);
CREATE TABLE IF NOT EXISTS routing_rules (id INTEGER, rule_type TEXT, match_value TEXT, assignee TEXT, role TEXT);
CREATE TABLE IF NOT EXISTS ticket_assignments (ticket_id INTEGER, assignee TEXT, role TEXT);
CREATE TABLE IF NOT EXISTS ticket_activity (ticket_id INTEGER, actor TEXT, activity_type TEXT, new_value TEXT);
CREATE TABLE IF NOT EXISTS tickets (
id INTEGER PRIMARY KEY AUTOINCREMENT,
source TEXT,
asset_identifier TEXT,
title TEXT,
severity TEXT,
description TEXT,
status TEXT,
dedupe_hash TEXT UNIQUE,
patched_at DATETIME,
domain TEXT,
triage_due_date DATETIME,
remediation_due_date DATETIME
)`)
if err != nil {
t.Fatalf("Failed to create schema: %v", err)
}
store.DB.Exec(`INSERT INTO tickets (source, asset_identifier, title, severity, description, status, dedupe_hash) VALUES
('Trivy', 'Server-A', 'Old Vuln', 'High', 'Desc', 'Waiting to be Triaged', 'hash_1_open')`)
store.DB.Exec(`INSERT INTO tickets (source, asset_identifier, title, severity, description, status, dedupe_hash) VALUES
('Trivy', 'Server-A', 'Old Vuln', 'High', 'Desc', 'Waiting to be Triaged', 'hash_1_open')`)
store.DB.Exec(`INSERT INTO tickets (source, asset_identifier, title, severity, description, status, dedupe_hash) VALUES
('Trivy', 'Server-A', 'Regressed Vuln', 'High', 'Desc', 'Patched', 'hash_2_patched')`)
incomingPayload := []domain.Ticket{
{Source: "Trivy", AssetIdentifier: "Server-A", Title: "Regressed Vuln", DedupeHash: "hash_2_patched"},
{Source: "Trivy", AssetIdentifier: "Server-A", Title: "Brand New Vuln", DedupeHash: "hash_3_new"},
}
err = store.ProcessIngestionBatch(context.Background(), "Trivy", "Server-A", incomingPayload)
if err != nil {
t.Fatalf("Diff Engine failed: %v", err)
}
var status string
store.DB.QueryRow(`SELECT status FROM tickets WHERE dedupe_hash = 'hash_1_open'`).Scan(&status)
if status != "Patched" {
t.Errorf("Expected hash_1_open to be Auto-Patched, got %s", status)
}
store.DB.QueryRow(`SELECT status FROM tickets WHERE dedupe_hash = 'hash_2_patched'`).Scan(&status)
if status != "Waiting to be Triaged" {
t.Errorf("Expected hash_2_patched to be Re-opened, got %s", status)
}
store.DB.QueryRow(`SELECT status FROM tickets WHERE dedupe_hash = 'hash_3_new'`).Scan(&status)
if status != "Waiting to be Triaged" {
t.Errorf("Expected hash_3_new to be newly created, got %s", status)
}
}

58
pkg/datastore/migrate.go Normal file
View File

@@ -0,0 +1,58 @@
package datastore
import (
"database/sql"
"fmt"
"log"
)
// RunMigrations ensures the database schema matches the binary version
func RunMigrations(db *sql.DB, migrations []string) error {
_, err := db.Exec(`
CREATE TABLE IF NOT EXISTS schema_migrations (
version INTEGER PRIMARY KEY,
applied_at DATETIME DEFAULT CURRENT_TIMESTAMP
)
`)
if err != nil {
return fmt.Errorf("failed to create schema_migrations table: %v", err)
}
var currentVersion int
err = db.QueryRow("SELECT IFNULL(MAX(version), 0) FROM schema_migrations").Scan(&currentVersion)
if err != nil && err != sql.ErrNoRows {
return fmt.Errorf("failed to read current schema version: %v", err)
}
for i, query := range migrations {
migrationVersion := i + 1
if migrationVersion > currentVersion {
log.Printf("🚀 Applying database migration v%d...", migrationVersion)
// Start a transaction so if the ALTER TABLE fails, it rolls back cleanly
tx, err := db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(query); err != nil {
tx.Rollback()
return fmt.Errorf("migration v%d failed: %v", migrationVersion, err)
}
if _, err := tx.Exec("INSERT INTO schema_migrations (version) VALUES (?)", migrationVersion); err != nil {
tx.Rollback()
return fmt.Errorf("failed to record migration v%d: %v", migrationVersion, err)
}
if err := tx.Commit(); err != nil {
return err
}
log.Printf("✅ Migration v%d applied successfully.", migrationVersion)
}
}
return nil
}

View File

@@ -0,0 +1,42 @@
package datastore
import (
"database/sql"
"testing"
_ "github.com/mattn/go-sqlite3"
)
func TestSchemaMigrations(t *testing.T) {
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
t.Fatalf("Failed to open test db: %v", err)
}
defer db.Close()
migrations := []string{
`CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT);`,
`ALTER TABLE users ADD COLUMN email TEXT;`,
}
err = RunMigrations(db, migrations)
if err != nil {
t.Fatalf("Initial migration failed: %v", err)
}
var version int
db.QueryRow("SELECT MAX(version) FROM schema_migrations").Scan(&version)
if version != 2 {
t.Errorf("Expected database to be at version 2, got %d", version)
}
err = RunMigrations(db, migrations)
if err != nil {
t.Fatalf("Idempotent migration failed: %v", err)
}
_, err = db.Exec("INSERT INTO users (name, email) VALUES ('Tim', 'tim@ranch.com')")
if err != nil {
t.Errorf("Migration 2 did not apply correctly! Column 'email' missing: %v", err)
}
}

147
pkg/datastore/schema.sql Normal file
View File

@@ -0,0 +1,147 @@
CREATE TABLE IF NOT EXISTS app_config (
id INTEGER PRIMARY KEY CHECK (id = 1),
timezone TEXT DEFAULT 'America/New_York',
business_start INTEGER DEFAULT 9,
business_end INTEGER DEFAULT 17,
default_extension_days INTEGER DEFAULT 30,
backup_enabled BOOLEAN DEFAULT 1,
backup_interval_hours INTEGER DEFAULT 24,
backup_retention_days INTEGER DEFAULT 30
);
INSERT OR IGNORE INTO app_config (id) VALUES (1);
CREATE TABLE IF NOT EXISTS domains (name TEXT PRIMARY KEY);
INSERT OR IGNORE INTO domains (name) VALUES ('Vulnerability'), ('Privacy'), ('Compliance'), ('Incident');
CREATE TABLE IF NOT EXISTS departments (name TEXT PRIMARY KEY);
INSERT OR IGNORE INTO departments (name) VALUES ('Security'), ('IT'), ('Privacy'), ('Legal'), ('Compliance');
CREATE TABLE IF NOT EXISTS sla_policies (
domain TEXT NOT NULL,
severity TEXT NOT NULL,
days_to_triage INTEGER NOT NULL DEFAULT 3,
days_to_remediate INTEGER NOT NULL,
max_extensions INTEGER NOT NULL DEFAULT 3,
PRIMARY KEY (domain, severity),
FOREIGN KEY(domain) REFERENCES domains(name) ON DELETE CASCADE
);
INSERT OR IGNORE INTO sla_policies (domain, severity, days_to_triage, days_to_remediate, max_extensions) VALUES
('Vulnerability', 'Critical', 3, 14, 1), ('Vulnerability', 'High', 3, 30, 2),
('Privacy', 'Critical', 3, 3, 0), ('Privacy', 'High', 3, 7, 1),
('Incident', 'Critical', 3, 1, 0);
CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
email TEXT UNIQUE NOT NULL,
password_hash TEXT NOT NULL,
full_name TEXT NOT NULL,
global_role TEXT NOT NULL CHECK(global_role IN ('Sheriff', 'RangeHand', 'Wrangler', 'CircuitRider', 'Magistrate')),
department TEXT NOT NULL DEFAULT 'Security',
is_active BOOLEAN DEFAULT 1,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY(department) REFERENCES departments(name) ON DELETE SET DEFAULT
);
CREATE TABLE IF NOT EXISTS sessions (
session_token TEXT PRIMARY KEY,
user_id INTEGER NOT NULL,
expires_at DATETIME NOT NULL,
FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS tickets (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT NOT NULL DEFAULT 'Vulnerability',
source TEXT NOT NULL DEFAULT 'Manual',
asset_identifier TEXT NOT NULL DEFAULT 'Default',
cve_id TEXT,
audit_id TEXT UNIQUE,
compliance_tags TEXT,
title TEXT NOT NULL,
description TEXT,
recommended_remediation TEXT,
severity TEXT NOT NULL,
status TEXT DEFAULT 'Waiting to be Triaged'
CHECK(status IN (
'Waiting to be Triaged',
'Returned to Security',
'Triaged',
'Assigned Out',
'Patched',
'False Positive'
)),
dedupe_hash TEXT UNIQUE NOT NULL,
patch_evidence TEXT,
accessible_to_internet BOOLEAN DEFAULT 0,
assignee TEXT DEFAULT 'Unassigned',
latest_comment TEXT DEFAULT '',
assigned_at DATETIME,
owner_viewed_at DATETIME,
triage_due_date DATETIME,
remediation_due_date DATETIME,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP,
patched_at DATETIME,
FOREIGN KEY(domain) REFERENCES domains(name) ON DELETE SET DEFAULT
);
CREATE INDEX IF NOT EXISTS idx_tickets_status ON tickets(status);
CREATE INDEX IF NOT EXISTS idx_tickets_severity ON tickets(severity);
CREATE INDEX IF NOT EXISTS idx_tickets_domain ON tickets(domain);
CREATE INDEX IF NOT EXISTS idx_tickets_source_asset ON tickets(source, asset_identifier);
CREATE TABLE IF NOT EXISTS ticket_assignments (
ticket_id INTEGER NOT NULL,
assignee TEXT NOT NULL,
role TEXT NOT NULL CHECK(role IN ('RangeHand', 'Wrangler', 'Magistrate')),
PRIMARY KEY (ticket_id, assignee, role),
FOREIGN KEY(ticket_id) REFERENCES tickets(id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS data_adapters (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
source_name TEXT NOT NULL,
findings_path TEXT NOT NULL DEFAULT '.',
mapping_title TEXT NOT NULL,
mapping_asset TEXT NOT NULL,
mapping_severity TEXT NOT NULL,
mapping_description TEXT,
mapping_remediation TEXT,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS sync_logs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
source TEXT NOT NULL,
status TEXT NOT NULL,
records_processed INTEGER NOT NULL,
error_message TEXT,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS draft_tickets (
id INTEGER PRIMARY KEY AUTOINCREMENT,
report_id TEXT NOT NULL,
title TEXT DEFAULT '',
description TEXT,
severity TEXT DEFAULT 'Medium',
asset_identifier TEXT DEFAULT '',
recommended_remediation TEXT DEFAULT '',
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_draft_tickets_report_id ON draft_tickets(report_id);
CREATE INDEX IF NOT EXISTS idx_assignments_assignee ON ticket_assignments(assignee);
CREATE INDEX IF NOT EXISTS idx_tickets_status_asset ON tickets(status, asset_identifier);
CREATE INDEX IF NOT EXISTS idx_tickets_updated_at ON tickets(updated_at);
CREATE INDEX IF NOT EXISTS idx_tickets_analytics ON tickets(status, severity, source);
CREATE INDEX IF NOT EXISTS idx_tickets_due_dates ON tickets(status, remediation_due_date, triage_due_date);
CREATE INDEX IF NOT EXISTS idx_tickets_source_status ON tickets(source, status);

17
pkg/datastore/sqlite.go Normal file
View File

@@ -0,0 +1,17 @@
package datastore
import (
"database/sql"
"epigas.gitea.cloud/RiskRancher/core/pkg/domain"
)
type SQLiteStore struct {
DB *sql.DB
}
var _ domain.TicketStore = (*SQLiteStore)(nil)
func NewSQLiteStore(db *sql.DB) *SQLiteStore {
return &SQLiteStore{DB: db}
}

View File

@@ -0,0 +1,173 @@
package datastore
import (
"context"
"time"
domain2 "epigas.gitea.cloud/RiskRancher/core/pkg/domain"
)
func (s *SQLiteStore) UpdateAppConfig(ctx context.Context, config domain2.AppConfig) error {
query := `
INSERT INTO app_config (id, timezone, business_start, business_end, default_extension_days)
VALUES (1, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
timezone = excluded.timezone,
business_start = excluded.business_start,
business_end = excluded.business_end,
default_extension_days = excluded.default_extension_days
`
_, err := s.DB.ExecContext(ctx, query, config.Timezone, config.BusinessStart, config.BusinessEnd, config.DefaultExtensionDays)
return err
}
func (s *SQLiteStore) GetAppConfig(ctx context.Context) (domain2.AppConfig, error) {
var c domain2.AppConfig
query := `SELECT timezone, business_start, business_end, default_extension_days,
backup_enabled, backup_interval_hours, backup_retention_days
FROM app_config WHERE id = 1`
err := s.DB.QueryRowContext(ctx, query).Scan(
&c.Timezone, &c.BusinessStart, &c.BusinessEnd, &c.DefaultExtensionDays,
&c.Backup.Enabled, &c.Backup.IntervalHours, &c.Backup.RetentionDays,
)
return c, err
}
// buildSLAMap creates a fast 2D lookup table: map[Domain][Severity]Policy
func (s *SQLiteStore) buildSLAMap(ctx context.Context) (map[string]map[string]domain2.SLAPolicy, error) {
policies, err := s.GetSLAPolicies(ctx)
if err != nil {
return nil, err
}
slaMap := make(map[string]map[string]domain2.SLAPolicy)
for _, p := range policies {
if slaMap[p.Domain] == nil {
slaMap[p.Domain] = make(map[string]domain2.SLAPolicy)
}
slaMap[p.Domain][p.Severity] = p
}
return slaMap, nil
}
func (s *SQLiteStore) ExportSystemState(ctx context.Context) (domain2.ExportState, error) {
var state domain2.ExportState
state.Version = "1.1"
state.ExportedAt = time.Now().UTC().Format(time.RFC3339)
config, err := s.GetAppConfig(ctx)
if err == nil {
state.AppConfig = config
}
slas, err := s.GetSLAPolicies(ctx)
if err == nil {
state.SLAPolicies = slas
}
users, err := s.GetAllUsers(ctx)
if err == nil {
for _, u := range users {
u.PasswordHash = ""
state.Users = append(state.Users, *u)
}
}
adapters, err := s.GetAdapters(ctx)
if err == nil {
state.Adapters = adapters
}
query := `SELECT id, domain, source, asset_identifier, title, COALESCE(description, ''), severity, status, dedupe_hash, created_at FROM tickets`
rows, err := s.DB.QueryContext(ctx, query)
if err != nil {
return state, err
}
defer rows.Close()
for rows.Next() {
var t domain2.Ticket
if err := rows.Scan(&t.ID, &t.Domain, &t.Source, &t.AssetIdentifier, &t.Title, &t.Description, &t.Severity, &t.Status, &t.DedupeHash, &t.CreatedAt); err == nil {
state.Tickets = append(state.Tickets, t)
}
}
return state, nil
}
func (s *SQLiteStore) UpdateBackupPolicy(ctx context.Context, policy domain2.BackupPolicy) error {
_, err := s.DB.ExecContext(ctx, `
UPDATE app_config
SET backup_enabled = ?, backup_interval_hours = ?, backup_retention_days = ?
WHERE id = 1`,
policy.Enabled, policy.IntervalHours, policy.RetentionDays)
return err
}
func (s *SQLiteStore) GetSLAPolicies(ctx context.Context) ([]domain2.SLAPolicy, error) {
rows, err := s.DB.QueryContext(ctx, "SELECT domain, severity, days_to_remediate, max_extensions, days_to_triage FROM sla_policies ORDER BY domain, severity")
if err != nil {
return nil, err
}
defer rows.Close()
var policies []domain2.SLAPolicy
for rows.Next() {
var p domain2.SLAPolicy
rows.Scan(&p.Domain, &p.Severity, &p.DaysToRemediate, &p.MaxExtensions, &p.DaysToTriage)
policies = append(policies, p)
}
return policies, nil
}
func (s *SQLiteStore) UpdateSLAPolicies(ctx context.Context, slas []domain2.SLAPolicy) error {
tx, err := s.DB.BeginTx(ctx, nil)
if err != nil {
return err
}
defer tx.Rollback()
stmt, err := tx.PrepareContext(ctx, `
UPDATE sla_policies
SET days_to_triage = ?, days_to_remediate = ?, max_extensions = ?
WHERE domain = ? AND severity = ?`)
if err != nil {
return err
}
defer stmt.Close()
for _, sla := range slas {
_, err = stmt.ExecContext(ctx, sla.DaysToTriage, sla.DaysToRemediate, sla.MaxExtensions, sla.Domain, sla.Severity)
if err != nil {
return err
}
}
return tx.Commit()
}
func (s *SQLiteStore) GetWranglers(ctx context.Context) ([]domain2.User, error) {
query := `
SELECT id, email, full_name, global_role, is_active, created_at
FROM users
WHERE global_role = 'Wrangler' AND is_active = 1
ORDER BY email ASC
`
rows, err := s.DB.QueryContext(ctx, query)
if err != nil {
return nil, err
}
defer rows.Close()
var wranglers []domain2.User
for rows.Next() {
var w domain2.User
if err := rows.Scan(&w.ID, &w.Email, &w.FullName, &w.GlobalRole, &w.IsActive, &w.CreatedAt); err != nil {
return nil, err
}
wranglers = append(wranglers, w)
}
return wranglers, nil
}

View File

@@ -0,0 +1,357 @@
package datastore
import (
"context"
"fmt"
"time"
domain2 "epigas.gitea.cloud/RiskRancher/core/pkg/domain"
)
func (s *SQLiteStore) GetSheriffAnalytics(ctx context.Context) (domain2.SheriffAnalytics, error) {
var metrics domain2.SheriffAnalytics
s.DB.QueryRowContext(ctx, "SELECT COUNT(*) FROM tickets WHERE is_cisa_kev = 1 AND status NOT IN ('Patched', 'Risk Accepted', 'False Positive')").Scan(&metrics.ActiveKEVs)
s.DB.QueryRowContext(ctx, "SELECT COUNT(*) FROM tickets WHERE severity = 'Critical' AND status NOT IN ('Patched', 'Risk Accepted', 'False Positive')").Scan(&metrics.OpenCriticals)
s.DB.QueryRowContext(ctx, "SELECT COUNT(*) FROM tickets WHERE remediation_due_date < CURRENT_TIMESTAMP AND status NOT IN ('Patched', 'Risk Accepted', 'False Positive')").Scan(&metrics.TotalOverdue)
mttrQuery := `
SELECT COALESCE(AVG(julianday(t.patched_at) - julianday(t.created_at)), 0)
FROM tickets t
WHERE t.status = 'Patched'
`
var mttrFloat float64
s.DB.QueryRowContext(ctx, mttrQuery).Scan(&mttrFloat)
metrics.GlobalMTTRDays = int(mttrFloat)
sourceQuery := `
SELECT
t.source,
SUM(CASE WHEN t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive') THEN 1 ELSE 0 END) as total_open,
SUM(CASE WHEN t.severity = 'Critical' AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive') THEN 1 ELSE 0 END) as criticals,
SUM(CASE WHEN t.is_cisa_kev = 1 AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive') THEN 1 ELSE 0 END) as cisa_kevs,
SUM(CASE WHEN t.status = 'Waiting to be Triaged' THEN 1 ELSE 0 END) as untriaged,
SUM(CASE WHEN t.remediation_due_date < CURRENT_TIMESTAMP AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive') THEN 1 ELSE 0 END) as patch_overdue,
SUM(CASE WHEN t.status = 'Pending Risk Approval' THEN 1 ELSE 0 END) as pending_risk,
SUM(CASE WHEN t.status IN ('Patched', 'Risk Accepted', 'False Positive') THEN 1 ELSE 0 END) as total_closed,
SUM(CASE WHEN t.status = 'Patched' THEN 1 ELSE 0 END) as patched,
SUM(CASE WHEN t.status = 'Risk Accepted' THEN 1 ELSE 0 END) as risk_accepted,
SUM(CASE WHEN t.status = 'False Positive' THEN 1 ELSE 0 END) as false_positive
FROM tickets t
GROUP BY t.source
ORDER BY criticals DESC, patch_overdue DESC
`
rows, err := s.DB.QueryContext(ctx, sourceQuery)
if err == nil {
defer rows.Close()
for rows.Next() {
var sm domain2.SourceMetrics
rows.Scan(&sm.Source, &sm.TotalOpen, &sm.Criticals, &sm.CisaKEVs, &sm.Untriaged, &sm.PatchOverdue, &sm.PendingRisk, &sm.TotalClosed, &sm.Patched, &sm.RiskAccepted, &sm.FalsePositive)
topAssigneeQ := `
SELECT COALESCE(ta.assignee, 'Unassigned'), COUNT(t.id) as c
FROM tickets t LEFT JOIN ticket_assignments ta ON t.id = ta.ticket_id
WHERE t.source = ? AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive')
GROUP BY ta.assignee ORDER BY c DESC LIMIT 1`
var assignee string
var count int
s.DB.QueryRowContext(ctx, topAssigneeQ, sm.Source).Scan(&assignee, &count)
if count > 0 {
sm.TopAssignee = fmt.Sprintf("%s (%d)", assignee, count)
} else {
sm.TopAssignee = "N/A"
}
if sm.PatchOverdue > 0 {
sm.StrategicNote = "🚨 SLA Breach (Escalate to IT Managers)"
} else if sm.Untriaged > 0 {
sm.StrategicNote = "⚠️ Triage Bottleneck (Check Analysts)"
} else if sm.PendingRisk > 0 {
sm.StrategicNote = "⚖️ Blocked by Exec Adjudication"
} else if sm.Criticals > 0 {
sm.StrategicNote = "🔥 High Risk (Monitor closely)"
} else if sm.RiskAccepted > sm.Patched && sm.TotalClosed > 0 {
sm.StrategicNote = "👀 High Risk Acceptance Rate (Audit Required)"
} else if sm.FalsePositive > sm.Patched && sm.TotalClosed > 0 {
sm.StrategicNote = "🔧 Noisy Source (Scanner needs tuning)"
} else if sm.TotalClosed > 0 {
sm.StrategicNote = "✅ Healthy Resolution Velocity"
} else {
sm.StrategicNote = "✅ Routine Processing"
}
metrics.SourceHealth = append(metrics.SourceHealth, sm)
}
}
sevQuery := `SELECT severity, COUNT(id) FROM tickets WHERE status NOT IN ('Patched', 'Risk Accepted', 'False Positive') GROUP BY severity`
rowsSev, err := s.DB.QueryContext(ctx, sevQuery)
if err == nil {
defer rowsSev.Close()
for rowsSev.Next() {
var sev string
var count int
rowsSev.Scan(&sev, &count)
metrics.Severity.Total += count
switch sev {
case "Critical":
metrics.Severity.Critical = count
case "High":
metrics.Severity.High = count
case "Medium":
metrics.Severity.Medium = count
case "Low":
metrics.Severity.Low = count
case "Info":
metrics.Severity.Info = count
}
}
if metrics.Severity.Total > 0 {
metrics.Severity.CritPct = int((float64(metrics.Severity.Critical) / float64(metrics.Severity.Total)) * 100)
metrics.Severity.HighPct = int((float64(metrics.Severity.High) / float64(metrics.Severity.Total)) * 100)
metrics.Severity.MedPct = int((float64(metrics.Severity.Medium) / float64(metrics.Severity.Total)) * 100)
metrics.Severity.LowPct = int((float64(metrics.Severity.Low) / float64(metrics.Severity.Total)) * 100)
metrics.Severity.InfoPct = int((float64(metrics.Severity.Info) / float64(metrics.Severity.Total)) * 100)
}
}
resQuery := `SELECT status, COUNT(id) FROM tickets WHERE status IN ('Patched', 'Risk Accepted', 'False Positive') GROUP BY status`
rowsRes, err := s.DB.QueryContext(ctx, resQuery)
if err == nil {
defer rowsRes.Close()
for rowsRes.Next() {
var status string
var count int
rowsRes.Scan(&status, &count)
metrics.Resolution.Total += count
switch status {
case "Patched":
metrics.Resolution.Patched = count
case "Risk Accepted":
metrics.Resolution.RiskAccepted = count
case "False Positive":
metrics.Resolution.FalsePositive = count
}
}
if metrics.Resolution.Total > 0 {
metrics.Resolution.PatchedPct = int((float64(metrics.Resolution.Patched) / float64(metrics.Resolution.Total)) * 100)
metrics.Resolution.RiskAccPct = int((float64(metrics.Resolution.RiskAccepted) / float64(metrics.Resolution.Total)) * 100)
metrics.Resolution.FalsePosPct = int((float64(metrics.Resolution.FalsePositive) / float64(metrics.Resolution.Total)) * 100)
}
}
assetQuery := `SELECT asset_identifier, COUNT(id) as c FROM tickets WHERE status NOT IN ('Patched', 'Risk Accepted', 'False Positive') GROUP BY asset_identifier ORDER BY c DESC LIMIT 5`
rowsAsset, err := s.DB.QueryContext(ctx, assetQuery)
if err == nil {
defer rowsAsset.Close()
var maxAssetCount int
for rowsAsset.Next() {
var am domain2.AssetMetric
rowsAsset.Scan(&am.Asset, &am.Count)
if maxAssetCount == 0 {
maxAssetCount = am.Count
}
if maxAssetCount > 0 {
am.Percentage = int((float64(am.Count) / float64(maxAssetCount)) * 100)
}
metrics.TopAssets = append(metrics.TopAssets, am)
}
}
return metrics, nil
}
func (s *SQLiteStore) GetDashboardTickets(ctx context.Context, tabStatus, filter, assetFilter, userEmail, userRole string, limit, offset int) ([]domain2.Ticket, int, map[string]int, error) {
metrics := map[string]int{
"critical": 0,
"overdue": 0,
"mine": 0,
"verification": 0,
"returned": 0,
}
scope := ""
var scopeArgs []any
if userRole == "Wrangler" {
scope = ` AND LOWER(t.assignee) = LOWER(?)`
scopeArgs = append(scopeArgs, userEmail)
}
if userRole != "Sheriff" {
var critCount, overCount, mineCount, verifyCount, returnedCount int
critQ := "SELECT COUNT(t.id) FROM tickets t WHERE t.severity = 'Critical' AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive')" + scope
s.DB.QueryRowContext(ctx, critQ, scopeArgs...).Scan(&critCount)
metrics["critical"] = critCount
overQ := "SELECT COUNT(t.id) FROM tickets t WHERE t.remediation_due_date < CURRENT_TIMESTAMP AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive')" + scope
s.DB.QueryRowContext(ctx, overQ, scopeArgs...).Scan(&overCount)
metrics["overdue"] = overCount
mineQ := "SELECT COUNT(t.id) FROM tickets t WHERE LOWER(t.assignee) = LOWER(?) AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive')"
s.DB.QueryRowContext(ctx, mineQ, userEmail).Scan(&mineCount)
metrics["mine"] = mineCount
verifyQ := "SELECT COUNT(t.id) FROM tickets t WHERE t.status = 'Pending Verification'" + scope
s.DB.QueryRowContext(ctx, verifyQ, scopeArgs...).Scan(&verifyCount)
metrics["verification"] = verifyCount
retQ := "SELECT COUNT(t.id) FROM tickets t WHERE t.status = 'Returned to Security'" + scope
s.DB.QueryRowContext(ctx, retQ, scopeArgs...).Scan(&returnedCount)
metrics["returned"] = returnedCount
}
baseQ := "FROM tickets t WHERE 1=1" + scope
var args []any
args = append(args, scopeArgs...)
if assetFilter != "" {
baseQ += " AND t.asset_identifier = ?"
args = append(args, assetFilter)
}
if tabStatus == "Waiting to be Triaged" || tabStatus == "holding_pen" {
baseQ += " AND t.status IN ('Waiting to be Triaged', 'Returned to Security', 'Triaged')"
} else if tabStatus == "Exceptions" {
baseQ += " AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive')"
} else if tabStatus == "archives" {
baseQ += " AND t.status IN ('Patched', 'Risk Accepted', 'False Positive')"
} else if tabStatus != "" {
baseQ += " AND t.status = ?"
args = append(args, tabStatus)
}
if filter == "critical" {
baseQ += " AND t.severity = 'Critical'"
} else if filter == "overdue" {
baseQ += " AND t.remediation_due_date < CURRENT_TIMESTAMP"
} else if filter == "mine" {
baseQ += " AND LOWER(t.assignee) = LOWER(?)"
args = append(args, userEmail)
} else if tabStatus == "archives" && filter != "" && filter != "all" {
baseQ += " AND t.status = ?"
args = append(args, filter)
}
var total int
s.DB.QueryRowContext(ctx, "SELECT COUNT(t.id) "+baseQ, args...).Scan(&total)
orderClause := "ORDER BY (CASE WHEN t.status = 'Returned to Security' THEN 0 ELSE 1 END) ASC, t.id DESC"
query := `
WITH PaginatedIDs AS (
SELECT t.id ` + baseQ + ` ` + orderClause + ` LIMIT ? OFFSET ?
)
SELECT
t.id, t.source, t.asset_identifier, t.title, COALESCE(t.description, ''), COALESCE(t.recommended_remediation, ''), t.severity, t.status,
t.triage_due_date, t.remediation_due_date, COALESCE(t.patch_evidence, ''),
t.assignee as current_assignee,
t.owner_viewed_at,
t.updated_at,
CAST(julianday(COALESCE(t.patched_at, t.updated_at)) - julianday(t.created_at) AS INTEGER) as days_to_resolve,
COALESCE(t.latest_comment, '') as latest_comment
FROM PaginatedIDs p
JOIN tickets t ON t.id = p.id
` + orderClause
args = append(args, limit, offset)
rows, err := s.DB.QueryContext(ctx, query, args...)
if err != nil {
return nil, 0, metrics, err
}
defer rows.Close()
var tickets []domain2.Ticket
for rows.Next() {
var t domain2.Ticket
var assignee string
err := rows.Scan(
&t.ID, &t.Source, &t.AssetIdentifier, &t.Title, &t.Description,
&t.RecommendedRemediation, &t.Severity, &t.Status,
&t.TriageDueDate, &t.RemediationDueDate, &t.PatchEvidence,
&assignee,
&t.OwnerViewedAt,
&t.UpdatedAt,
&t.DaysToResolve,
&t.LatestComment,
)
if err == nil {
t.Assignee = assignee
t.IsOverdue = !t.RemediationDueDate.IsZero() && t.RemediationDueDate.Before(time.Now()) && t.Status != "Patched" && t.Status != "Risk Accepted"
if tabStatus == "archives" {
if t.DaysToResolve != nil {
t.SLAString = fmt.Sprintf("%d days", *t.DaysToResolve)
} else {
t.SLAString = "Unknown"
}
} else {
t.SLAString = t.RemediationDueDate.Format("Jan 02, 2006")
}
tickets = append(tickets, t)
}
}
return tickets, total, metrics, nil
}
func (s *SQLiteStore) GetGlobalActivityFeed(ctx context.Context, limit int) ([]domain2.FeedItem, error) {
return []domain2.FeedItem{
{
Actor: "System",
ActivityType: "Info",
NewValue: "Detailed Immutable Audit Logging is a RiskRancher Pro feature. Upgrade to track all ticket lifecycle events.",
TimeAgo: "Just now",
},
}, nil
}
func (s *SQLiteStore) GetAnalyticsSummary(ctx context.Context) (map[string]int, error) {
summary := make(map[string]int)
var total int
err := s.DB.QueryRowContext(ctx, `SELECT COUNT(*) FROM tickets WHERE status != 'Patched' AND status != 'Risk Accepted'`).Scan(&total)
if err != nil {
return nil, err
}
summary["Total_Open"] = total
sourceRows, err := s.DB.QueryContext(ctx, `SELECT source, COUNT(*) FROM tickets WHERE status != 'Patched' AND status != 'Risk Accepted' GROUP BY source`)
if err == nil {
defer sourceRows.Close()
for sourceRows.Next() {
var source string
var count int
if err := sourceRows.Scan(&source, &count); err == nil {
summary["Source_"+source+"_Open"] = count
}
}
}
sevRows, err := s.DB.QueryContext(ctx, `SELECT severity, COUNT(*) FROM tickets WHERE status != 'Patched' AND status != 'Risk Accepted' GROUP BY severity`)
if err == nil {
defer sevRows.Close()
for sevRows.Next() {
var sev string
var count int
if err := sevRows.Scan(&sev, &count); err == nil {
summary["Severity_"+sev+"_Open"] = count
}
}
}
return summary, nil
}
func (s *SQLiteStore) GetPaginatedActivityFeed(ctx context.Context, filter string, limit, offset int) ([]domain2.FeedItem, int, error) {
return []domain2.FeedItem{}, 0, nil
}

View File

@@ -0,0 +1,109 @@
package datastore
import (
"context"
"fmt"
domain2 "epigas.gitea.cloud/RiskRancher/core/pkg/domain"
)
func (s *SQLiteStore) SaveDraft(ctx context.Context, d domain2.DraftTicket) error {
query := `
INSERT INTO draft_tickets (report_id, title, description, severity, asset_identifier, recommended_remediation)
VALUES (?, ?, ?, ?, ?, ?)`
_, err := s.DB.ExecContext(ctx, query,
d.ReportID, d.Title, d.Description, d.Severity, d.AssetIdentifier, d.RecommendedRemediation)
return err
}
func (s *SQLiteStore) GetDraftsByReport(ctx context.Context, reportID string) ([]domain2.DraftTicket, error) {
query := `SELECT id, report_id, COALESCE(title, ''), COALESCE(description, ''), COALESCE(severity, 'Medium'), COALESCE(asset_identifier, ''), COALESCE(recommended_remediation, '')
FROM draft_tickets WHERE report_id = ?`
rows, err := s.DB.QueryContext(ctx, query, reportID)
if err != nil {
return nil, err
}
defer rows.Close()
var drafts []domain2.DraftTicket
for rows.Next() {
var d domain2.DraftTicket
if err := rows.Scan(&d.ID, &d.ReportID, &d.Title, &d.Description, &d.Severity, &d.AssetIdentifier, &d.RecommendedRemediation); err == nil {
drafts = append(drafts, d)
}
}
if drafts == nil {
drafts = []domain2.DraftTicket{}
}
return drafts, nil
}
func (s *SQLiteStore) DeleteDraft(ctx context.Context, draftID string) error {
query := `DELETE FROM draft_tickets WHERE id = ?`
_, err := s.DB.ExecContext(ctx, query, draftID)
return err
}
func (s *SQLiteStore) UpdateDraft(ctx context.Context, draftID int, payload domain2.Ticket) error {
query := `UPDATE draft_tickets SET title = ?, severity = ?, asset_identifier = ?, description = ?, recommended_remediation = ? WHERE id = ?`
_, err := s.DB.ExecContext(
ctx,
query,
payload.Title,
payload.Severity,
payload.AssetIdentifier,
payload.Description,
payload.RecommendedRemediation,
draftID,
)
return err
}
func (s *SQLiteStore) PromotePentestDrafts(ctx context.Context, reportID string, analystEmail string, tickets []domain2.Ticket) error {
tx, err := s.DB.BeginTx(ctx, nil)
if err != nil {
return err
}
defer tx.Rollback()
for _, t := range tickets {
hash := fmt.Sprintf("manual-pentest-%s-%s", t.AssetIdentifier, t.Title)
res, err := tx.ExecContext(ctx, `
INSERT INTO tickets (
source, asset_identifier, title, description, recommended_remediation, severity, status, dedupe_hash,
triage_due_date, remediation_due_date, created_at, updated_at
)
VALUES (?, ?, ?, ?, ?, ?, 'Waiting to be Triaged', ?, DATETIME('now', '+3 days'), DATETIME('now', '+14 days'), CURRENT_TIMESTAMP, CURRENT_TIMESTAMP)
`, "Manual Pentest", t.AssetIdentifier, t.Title, t.Description, t.RecommendedRemediation, t.Severity, hash)
if err != nil {
return err
}
ticketID, err := res.LastInsertId()
if err != nil {
return err
}
_, err = tx.ExecContext(ctx, `
INSERT INTO ticket_assignments (ticket_id, assignee, role)
VALUES (?, ?, 'RangeHand')
`, ticketID, analystEmail)
if err != nil {
return err
}
}
_, err = tx.ExecContext(ctx, "DELETE FROM draft_tickets WHERE report_id = ?", reportID)
if err != nil {
return err
}
return tx.Commit()
}

View File

@@ -0,0 +1,284 @@
package datastore
import (
"context"
"database/sql"
"time"
domain2 "epigas.gitea.cloud/RiskRancher/core/pkg/domain"
)
func (s *SQLiteStore) IngestTickets(ctx context.Context, tickets []domain2.Ticket) error {
tx, err := s.DB.BeginTx(ctx, nil)
if err != nil {
return err
}
defer tx.Rollback()
_, err = tx.ExecContext(ctx, `
CREATE TEMP TABLE IF NOT EXISTS staging_tickets (
domain TEXT, source TEXT, asset_identifier TEXT, title TEXT,
description TEXT, recommended_remediation TEXT, severity TEXT,
status TEXT, dedupe_hash TEXT
)
`)
if err != nil {
return err
}
tx.ExecContext(ctx, `DELETE FROM staging_tickets`)
stmt, err := tx.PrepareContext(ctx, `
INSERT INTO staging_tickets (domain, source, asset_identifier, title, description, recommended_remediation, severity, status, dedupe_hash)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
`)
if err != nil {
return err
}
for _, t := range tickets {
status := t.Status
if status == "" {
status = "Waiting to be Triaged"
}
domain := t.Domain
if domain == "" {
domain = "Vulnerability"
}
source := t.Source
if source == "" {
source = "Manual"
}
_, err = stmt.ExecContext(ctx, domain, source, t.AssetIdentifier, t.Title, t.Description, t.RecommendedRemediation, t.Severity, status, t.DedupeHash)
if err != nil {
stmt.Close()
return err
}
}
stmt.Close()
_, err = tx.ExecContext(ctx, `
INSERT INTO tickets (domain, source, asset_identifier, title, description, recommended_remediation, severity, status, dedupe_hash)
SELECT domain, source, asset_identifier, title, description, recommended_remediation, severity, status, dedupe_hash
FROM staging_tickets
WHERE true -- Prevents SQLite from mistaking 'ON CONFLICT' for a JOIN condition
ON CONFLICT(dedupe_hash) DO UPDATE SET
description = excluded.description,
updated_at = CURRENT_TIMESTAMP
`)
if err != nil {
return err
}
tx.ExecContext(ctx, `DROP TABLE staging_tickets`)
return tx.Commit()
}
func (s *SQLiteStore) GetAdapters(ctx context.Context) ([]domain2.Adapter, error) {
rows, err := s.DB.QueryContext(ctx, "SELECT id, name, source_name, findings_path, mapping_title, mapping_asset, mapping_severity, mapping_description, mapping_remediation FROM data_adapters")
if err != nil {
return nil, err
}
defer rows.Close()
var adapters []domain2.Adapter
for rows.Next() {
var a domain2.Adapter
rows.Scan(&a.ID, &a.Name, &a.SourceName, &a.FindingsPath, &a.MappingTitle, &a.MappingAsset, &a.MappingSeverity, &a.MappingDescription, &a.MappingRemediation)
adapters = append(adapters, a)
}
return adapters, nil
}
func (s *SQLiteStore) SaveAdapter(ctx context.Context, a domain2.Adapter) error {
_, err := s.DB.ExecContext(ctx, `
INSERT INTO data_adapters (name, source_name, findings_path, mapping_title, mapping_asset, mapping_severity, mapping_description, mapping_remediation)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)`,
a.Name, a.SourceName, a.FindingsPath, a.MappingTitle, a.MappingAsset, a.MappingSeverity, a.MappingDescription, a.MappingRemediation)
return err
}
func (s *SQLiteStore) GetAdapterByID(ctx context.Context, id int) (domain2.Adapter, error) {
var a domain2.Adapter
query := `
SELECT
id, name, source_name, findings_path,
mapping_title, mapping_asset, mapping_severity,
IFNULL(mapping_description, ''), IFNULL(mapping_remediation, ''),
created_at, updated_at
FROM data_adapters
WHERE id = ?`
err := s.DB.QueryRowContext(ctx, query, id).Scan(
&a.ID, &a.Name, &a.SourceName, &a.FindingsPath,
&a.MappingTitle, &a.MappingAsset, &a.MappingSeverity,
&a.MappingDescription, &a.MappingRemediation,
&a.CreatedAt, &a.UpdatedAt,
)
return a, err
}
func (s *SQLiteStore) DeleteAdapter(ctx context.Context, id int) error {
_, err := s.DB.ExecContext(ctx, "DELETE FROM data_adapters WHERE id = ?", id)
return err
}
func (s *SQLiteStore) GetAdapterByName(ctx context.Context, name string) (domain2.Adapter, error) {
var a domain2.Adapter
query := `
SELECT
id, name, source_name, findings_path,
mapping_title, mapping_asset, mapping_severity,
IFNULL(mapping_description, ''), IFNULL(mapping_remediation, '')
FROM data_adapters
WHERE name = ?`
err := s.DB.QueryRowContext(ctx, query, name).Scan(
&a.ID, &a.Name, &a.SourceName, &a.FindingsPath,
&a.MappingTitle, &a.MappingAsset, &a.MappingSeverity,
&a.MappingDescription, &a.MappingRemediation,
)
return a, err
}
func (s *SQLiteStore) ProcessIngestionBatch(ctx context.Context, source, asset string, incoming []domain2.Ticket) error {
slaMap, _ := s.buildSLAMap(ctx)
tx, err := s.DB.BeginTx(ctx, nil)
if err != nil {
return err
}
defer tx.Rollback()
for i := range incoming {
if incoming[i].Domain == "" {
incoming[i].Domain = "Vulnerability"
}
if incoming[i].Status == "" {
incoming[i].Status = "Waiting to be Triaged"
}
}
inserts, reopens, updates, closes, err := s.calculateDiffState(ctx, tx, source, asset, incoming)
if err != nil {
return err
}
if err := s.executeBatchMutations(ctx, tx, source, asset, slaMap, inserts, reopens, updates, closes); err != nil {
return err
}
return tx.Commit()
}
func (s *SQLiteStore) calculateDiffState(ctx context.Context, tx *sql.Tx, source, asset string, incoming []domain2.Ticket) (inserts, reopens, descUpdates []domain2.Ticket, autocloses []string, err error) {
rows, err := tx.QueryContext(ctx, `SELECT dedupe_hash, status, COALESCE(description, '') FROM tickets WHERE source = ? AND asset_identifier = ?`, source, asset)
if err != nil {
return nil, nil, nil, nil, err
}
defer rows.Close()
type existingRecord struct{ status, description string }
existingMap := make(map[string]existingRecord)
for rows.Next() {
var hash, status, desc string
if err := rows.Scan(&hash, &status, &desc); err == nil {
existingMap[hash] = existingRecord{status: status, description: desc}
}
}
incomingMap := make(map[string]bool)
for _, ticket := range incoming {
incomingMap[ticket.DedupeHash] = true
existing, exists := existingMap[ticket.DedupeHash]
if !exists {
inserts = append(inserts, ticket)
} else {
if existing.status == "Patched" {
reopens = append(reopens, ticket)
}
if ticket.Description != "" && ticket.Description != existing.description && existing.status != "Patched" && existing.status != "Risk Accepted" && existing.status != "False Positive" {
descUpdates = append(descUpdates, ticket)
}
}
}
for hash, record := range existingMap {
if !incomingMap[hash] && record.status != "Patched" && record.status != "Risk Accepted" && record.status != "False Positive" {
autocloses = append(autocloses, hash)
}
}
return inserts, reopens, descUpdates, autocloses, nil
}
func (s *SQLiteStore) executeBatchMutations(ctx context.Context, tx *sql.Tx, source, asset string, slaMap map[string]map[string]domain2.SLAPolicy, inserts, reopens, descUpdates []domain2.Ticket, autocloses []string) error {
now := time.Now()
// A. Inserts
if len(inserts) > 0 {
insertStmt, err := tx.PrepareContext(ctx, `INSERT INTO tickets (source, asset_identifier, title, severity, description, status, dedupe_hash, domain, triage_due_date, remediation_due_date) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`)
if err != nil {
return err
}
defer insertStmt.Close()
for _, t := range inserts {
daysToTriage, daysToRemediate := 3, 30
if dMap, ok := slaMap[t.Domain]; ok {
if policy, ok := dMap[t.Severity]; ok {
daysToTriage, daysToRemediate = policy.DaysToTriage, policy.DaysToRemediate
}
}
_, err := insertStmt.ExecContext(ctx, source, asset, t.Title, t.Severity, t.Description, t.Status, t.DedupeHash, t.Domain, now.AddDate(0, 0, daysToTriage), now.AddDate(0, 0, daysToRemediate))
if err != nil {
return err
}
}
}
if len(reopens) > 0 {
updateStmt, _ := tx.PrepareContext(ctx, `UPDATE tickets SET status = 'Waiting to be Triaged', patched_at = NULL, triage_due_date = ?, remediation_due_date = ? WHERE dedupe_hash = ?`)
defer updateStmt.Close()
for _, t := range reopens {
updateStmt.ExecContext(ctx, now.AddDate(0, 0, 3), now.AddDate(0, 0, 30), t.DedupeHash) // Using default SLAs for fallback
}
}
if len(descUpdates) > 0 {
descStmt, _ := tx.PrepareContext(ctx, `UPDATE tickets SET description = ? WHERE dedupe_hash = ?`)
defer descStmt.Close()
for _, t := range descUpdates {
descStmt.ExecContext(ctx, t.Description, t.DedupeHash)
}
}
if len(autocloses) > 0 {
closeStmt, _ := tx.PrepareContext(ctx, `UPDATE tickets SET status = 'Patched', patched_at = CURRENT_TIMESTAMP WHERE dedupe_hash = ?`)
defer closeStmt.Close()
for _, hash := range autocloses {
closeStmt.ExecContext(ctx, hash)
}
}
return nil
}
func (s *SQLiteStore) LogSync(ctx context.Context, source, status string, records int, errMsg string) error {
_, err := s.DB.ExecContext(ctx, `INSERT INTO sync_logs (source, status, records_processed, error_message) VALUES (?, ?, ?, ?)`, source, status, records, errMsg)
return err
}
func (s *SQLiteStore) GetRecentSyncLogs(ctx context.Context, limit int) ([]domain2.SyncLog, error) {
rows, err := s.DB.QueryContext(ctx, `SELECT id, source, status, records_processed, IFNULL(error_message, ''), created_at FROM sync_logs ORDER BY id DESC LIMIT ?`, limit)
if err != nil {
return nil, err
}
defer rows.Close()
var logs []domain2.SyncLog
for rows.Next() {
var l domain2.SyncLog
rows.Scan(&l.ID, &l.Source, &l.Status, &l.RecordsProcessed, &l.ErrorMessage, &l.CreatedAt)
logs = append(logs, l)
}
return logs, nil
}

View File

@@ -0,0 +1,131 @@
package datastore
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"time"
"epigas.gitea.cloud/RiskRancher/core/pkg/domain"
)
func (s *SQLiteStore) GetTickets(ctx context.Context) ([]domain.Ticket, error) {
rows, err := s.DB.QueryContext(ctx, "SELECT id, title, severity, status FROM tickets LIMIT 100")
if err != nil {
return nil, err
}
defer rows.Close()
var tickets []domain.Ticket
for rows.Next() {
var t domain.Ticket
rows.Scan(&t.ID, &t.Title, &t.Severity, &t.Status)
tickets = append(tickets, t)
}
return tickets, nil
}
func (s *SQLiteStore) CreateTicket(ctx context.Context, t *domain.Ticket) error {
if t.Status == "" {
t.Status = "Waiting to be Triaged"
}
if t.Domain == "" {
t.Domain = "Vulnerability"
}
if t.Source == "" {
t.Source = "Manual"
}
if t.AssetIdentifier == "" {
t.AssetIdentifier = "Default"
}
rawHash := fmt.Sprintf("%s-%s-%s-%s", t.Source, t.AssetIdentifier, t.Title, t.Severity)
hashBytes := sha256.Sum256([]byte(rawHash))
t.DedupeHash = hex.EncodeToString(hashBytes[:])
query := `
INSERT INTO tickets (
domain, source, asset_identifier, title, description, recommended_remediation,
severity, status, dedupe_hash,
triage_due_date, remediation_due_date, created_at, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now', '+3 days'), DATETIME('now', '+14 days'), CURRENT_TIMESTAMP, CURRENT_TIMESTAMP)
`
res, err := s.DB.ExecContext(ctx, query,
t.Domain, t.Source, t.AssetIdentifier, t.Title, t.Description, t.RecommendedRemediation,
t.Severity, t.Status, t.DedupeHash,
)
if err != nil {
return err
}
id, _ := res.LastInsertId()
t.ID = int(id)
return nil
}
// UpdateTicketInline handles a single UI edit and updates the flattened comment tracking
func (s *SQLiteStore) UpdateTicketInline(ctx context.Context, ticketID int, severity, description, remediation, comment, actor, status, assignee string) error {
query := `
UPDATE tickets
SET severity = ?, description = ?, recommended_remediation = ?,
status = ?, assignee = ?,
latest_comment = CASE WHEN ? != '' THEN ? ELSE latest_comment END,
updated_at = CURRENT_TIMESTAMP
WHERE id = ?`
formattedComment := ""
if comment != "" {
formattedComment = "[" + actor + "] " + comment
}
_, err := s.DB.ExecContext(ctx, query, severity, description, remediation, status, assignee, formattedComment, formattedComment, ticketID)
return err
}
// RejectTicketFromWrangler puts a ticket back into the Holding Pen
func (s *SQLiteStore) RejectTicketFromWrangler(ctx context.Context, ticketIDs []int, reason, comment string) error {
tx, err := s.DB.BeginTx(ctx, nil)
if err != nil {
return err
}
defer tx.Rollback()
for _, id := range ticketIDs {
fullComment := "[Wrangler Reject: " + reason + "] " + comment
_, err := tx.ExecContext(ctx, "UPDATE tickets SET status = 'Returned to Security', assignee = 'Unassigned', latest_comment = ?, updated_at = CURRENT_TIMESTAMP WHERE id = ?", fullComment, id)
if err != nil {
return err
}
}
return tx.Commit()
}
func (s *SQLiteStore) GetTicketByID(ctx context.Context, id int) (domain.Ticket, error) {
var t domain.Ticket
var triageDue, remDue, created, updated string
var patchedAt *string
query := `SELECT id, domain, source, asset_identifier, title, description, recommended_remediation, severity, status, dedupe_hash, triage_due_date, remediation_due_date, created_at, updated_at, patched_at, assignee, latest_comment FROM tickets WHERE id = ?`
err := s.DB.QueryRowContext(ctx, query, id).Scan(
&t.ID, &t.Domain, &t.Source, &t.AssetIdentifier, &t.Title, &t.Description, &t.RecommendedRemediation, &t.Severity, &t.Status, &t.DedupeHash, &triageDue, &remDue, &created, &updated, &patchedAt, &t.Assignee, &t.LatestComment,
)
if err != nil {
return t, err
}
t.TriageDueDate, _ = time.Parse(time.RFC3339, triageDue)
t.RemediationDueDate, _ = time.Parse(time.RFC3339, remDue)
t.CreatedAt, _ = time.Parse(time.RFC3339, created)
t.UpdatedAt, _ = time.Parse(time.RFC3339, updated)
if patchedAt != nil {
pTime, _ := time.Parse(time.RFC3339, *patchedAt)
t.PatchedAt = &pTime
}
return t, nil
}