diff --git a/.gitea/workflows/release.yaml b/.gitea/workflows/release.yaml new file mode 100644 index 0000000..6ab7fac --- /dev/null +++ b/.gitea/workflows/release.yaml @@ -0,0 +1,53 @@ +name: Build and Release Core + +on: + push: + tags: + - 'v*' # Only trigger when you push a version tag (e.g., v1.0.0) + pull_request: + branches: [ main ] # Run tests on PRs, but don't build release binaries + +jobs: + build: + name: Test and Build + runs-on: ubuntu-latest # This runs on your Gitea act_runner + steps: + - name: Checkout Code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v4 + with: + go-version: '1.26' # Update to match your go.mod if different + + - name: Run Tests + run: go test ./... -v + + - name: Build Binaries + # Only run the build steps if this was triggered by a tag push + if: startsWith(github.ref, 'refs/tags/') + run: | + + VERSION=${GITHUB_REF_NAME} + COMMIT=$(echo ${GITHUB_SHA} | cut -c1-7) + + + LDFLAGS="-X 'main.BuildVersion=$VERSION' -X 'main.BuildCommit=$COMMIT'" + + mkdir -p bin + + echo "Building Linux (amd64)..." + GOOS=linux GOARCH=amd64 go build -ldflags="$LDFLAGS" -o bin/rr-linux-amd64 ./cmd/rr/main.go + + echo "Building macOS (Apple Silicon arm64)..." + GOOS=darwin GOARCH=arm64 go build -ldflags="$LDFLAGS" -o bin/rr-darwin-arm64 ./cmd/rr/main.go + + echo "Building Windows (amd64)..." + GOOS=windows GOARCH=amd64 go build -ldflags="$LDFLAGS" -o bin/rr-windows-amd64.exe ./cmd/rr/main.go + + - name: Upload Artifacts + if: startsWith(github.ref, 'refs/tags/') + uses: actions/upload-artifact@v4 + with: + name: riskrancher-core-binaries + path: bin/ \ No newline at end of file diff --git a/.gitignore b/.gitignore index 5b90e79..a57b3bb 100644 --- a/.gitignore +++ b/.gitignore @@ -1,27 +1,59 @@ -# ---> Go -# If you prefer the allow list template instead of the deny list, see community template: -# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore -# -# Binaries for programs and plugins +# ========================= +# Go Standard +# ========================= *.exe *.exe~ *.dll *.so *.dylib - -# Test binary, built with `go test -c` *.test - -# Output of the go coverage tool, specifically when used with LiteIDE *.out - -# Dependency directories (remove the comment below to include it) -# vendor/ - -# Go workspace file go.work go.work.sum +vendor/ -# env file +# ========================= +# RiskRancher Compiled Binaries +# ========================= +# Ignore the local builds so you don't accidentally push a 20MB executable +rr +rr.exe + +# ========================= +# Runtime Data & Uploads (CRITICAL) +# ========================= +# Ignore all runtime databases and uploaded pentest reports/images +/data/* +!/data/.keep +/backups/* +!/backups/.keep + +# SQLite temporary and journal files +*.db +*.db-shm +*.db-wal +*.sqlite +*.sqlite3 + +# ========================= +# Environment & Secrets +# ========================= .env +.env.* +!.env.example +# ========================= +# IDEs & OS Files +# ========================= +# GoLand / IntelliJ +.idea/ +*.iml + +# macOS +.DS_Store +.AppleDouble +.LSOverride + +# Windows +Thumbs.db +ehthumbs.db \ No newline at end of file diff --git a/README.md b/README.md index c9650d6..e77de0f 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,24 @@ -# core +# ๐ด RiskRancher Core (Community Edition) -Open-source Risk-Based Vulnerability Management (RBVM). A high-performance, air-gapped single binary for finding ingestion, deduplication, and triage. \ No newline at end of file +> So simple your manager could deploy it. + +RiskRancher Core is an open-source **Risk-Based Vulnerability Management (RBVM)** and **ASPM** platform built for modern DevSecOps teams. Compiled as a lightning-fast, **air-gapped single Go binary** with an embedded SQLite database, it ingests, deduplicates, and routes millions of security findings from your CI/CD pipelines and scanners. + +No external databases to spin up, no Docker swarms to manage, and zero complex microservices. Just drop the binary on a server and start triaging. + +## ๐ Getting Started + +### Option A: Download the Binary (Recommended) +1. Go to the [Releases](#) tab and download the compiled executable for your OS (Windows/macOS/Linux). +2. Place the binary in a dedicated directory and execute it. +3. Visit `http://localhost:8080` in your browser. + +### Option B: Compile from Source +Ensure you have **Go 1.26+** installed (*CGO is required for the native `mattn/go-sqlite3` driver*). + +```bash +git clone [https://gitea.yourdomain.com/RiskRancher/core.git](https://gitea.yourdomain.com/RiskRancher/core.git) +cd core +go build -o rr ./cmd/rr/main.go +./rr +``` \ No newline at end of file diff --git a/backups/.keep b/backups/.keep new file mode 100644 index 0000000..e69de29 diff --git a/cmd/stresstest/main.go b/cmd/stresstest/main.go new file mode 100644 index 0000000..44cc01b --- /dev/null +++ b/cmd/stresstest/main.go @@ -0,0 +1,166 @@ +package main + +import ( + "bytes" + "database/sql" + "encoding/json" + "flag" + "fmt" + "log" + "net/http" + "time" + + "epigas.gitea.cloud/RiskRancher/core/pkg/auth" + "epigas.gitea.cloud/RiskRancher/core/pkg/datastore" +) + +func main() { + sizeFlag := flag.String("size", "small", "Choose 'small' (100 tickets) or 'large' (300,000 tickets)") + flag.Parse() + + totalTickets := 100 + batchSize := 100 + + if *sizeFlag == "large" { + totalTickets = 300000 + batchSize = 10000 // Ingest in chunks of 10k + } + + db := datastore.InitDB("./data/RiskRancher.db") + defer db.Close() + + log.Printf("๐งน Sweeping the ranch (Deleting old test data)...") + + db.Exec("DELETE FROM ticket_assignments") + db.Exec("DELETE FROM tickets") + db.Exec("DELETE FROM sync_logs") + db.Exec("DELETE FROM draft_tickets") + + // Reset the auto-increment counters so Ticket IDs reliably start at 1 + db.Exec("DELETE FROM sqlite_sequence") + + log.Println("โ๏ธ Seeding global config, adapters, and SLA matrix...") + + db.Exec("INSERT OR IGNORE INTO app_config (id, timezone, business_start, business_end, default_extension_days) VALUES (1, 'America/New_York', 9, 17, 30)") + db.Exec("INSERT OR IGNORE INTO domains (name) VALUES ('Vulnerability'), ('Privacy'), ('Compliance'), ('Incident')") + db.Exec("INSERT OR IGNORE INTO departments (name) VALUES ('Security'), ('IT'), ('Privacy'), ('Legal'), ('Compliance')") + + slaQuery := `INSERT OR IGNORE INTO sla_policies (domain, severity, days_to_triage, days_to_remediate, max_extensions) VALUES + ('Vulnerability', 'Critical', 1, 3, 0), ('Vulnerability', 'High', 3, 14, 1), ('Vulnerability', 'Medium', 5, 30, 2), ('Vulnerability', 'Low', 8, 90, 3), ('Vulnerability', 'Info', 0, 0, 0)` + db.Exec(slaQuery) + + adapterQuery := `INSERT OR IGNORE INTO data_adapters (name, source_name, findings_path, mapping_title, mapping_asset, mapping_severity) VALUES + ('Trivy Container Security', 'Trivy', '.', 'title', 'asset', 'severity'), + ('GitHub Dependabot', 'Dependabot', '.', 'title', 'asset', 'severity'), + ('Tenable Nessus', 'Nessus', '.', 'title', 'asset', 'severity'), + ('Manual Entry API', 'Manual', '.', 'title', 'asset', 'severity')` + db.Exec(adapterQuery) + + validHash, _ := auth.HashPassword("password123") + + _, err := db.Exec("INSERT OR REPLACE INTO users (id, email, full_name, password_hash, global_role, is_active) VALUES (999, 'stress@ranch.com', 'Stress Tester', ?, 'Sheriff', 1)", validHash) + if err != nil { + log.Fatalf("๐จ Failed to seed Stress User (Database locked?): %v", err) + } + + _, err = db.Exec("INSERT OR REPLACE INTO sessions (session_token, user_id, expires_at) VALUES ('stress_token_123', 999, datetime('now', '+1 hour'))") + if err != nil { + log.Fatalf("๐จ Failed to seed Stress Session: %v", err) + } + + log.Println("==========================================================================") + log.Printf("๐ COMMENCING %d TICKET API LOAD TEST (%s mode)", totalTickets, *sizeFlag) + log.Println("โ ๏ธ CRITICAL: Ensure your RiskRancher server is running in another terminal!") + log.Println("==========================================================================") + time.Sleep(1 * time.Second) + + client := &http.Client{Timeout: 5 * time.Minute} + baseURL := "http://localhost:8080" + sessionCookie := &http.Cookie{Name: "session_token", Value: "stress_token_123"} + + ticketCounter := 1 + + log.Printf("๐ฅ PHASE 1: Ingesting via API in batches of %d...", batchSize) + for b := 0; b < totalTickets/batchSize; b++ { + var payload []map[string]string + + for i := 0; i < batchSize; i++ { + assetName := fmt.Sprintf("server-prod-%05d", (ticketCounter%50)+1) + + sev := "Medium" + if ticketCounter%10 == 0 { + sev = "Critical" + } else if ticketCounter%5 == 0 { + sev = "High" + } else if ticketCounter%2 == 0 { + sev = "Low" + } + + source := "Trivy" + if ticketCounter%3 == 0 { + source = "Dependabot" + } else if ticketCounter%7 == 0 { + source = "Nessus" + } + + payload = append(payload, map[string]string{ + "source": source, + "asset_identifier": assetName, + "title": fmt.Sprintf("Vulnerability-%06d", ticketCounter), + "severity": sev, + "description": fmt.Sprintf("Stress test vulnerability payload #%d", ticketCounter), + }) + ticketCounter++ + } + + body, _ := json.Marshal(payload) + req, _ := http.NewRequest(http.MethodPost, baseURL+"/api/ingest", bytes.NewBuffer(body)) + req.AddCookie(sessionCookie) + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + log.Fatalf("๐จ API Request failed: %v", err) + } + if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusOK { + log.Fatalf("๐จ API returned unexpected status: %d", resp.StatusCode) + } + resp.Body.Close() + fmt.Printf("โ Ingested batch %d/%d (%d tickets)\n", b+1, totalTickets/batchSize, len(payload)) + } + + log.Println("\n๐ PHASE 2: Distributing tickets to valid Core workflows...") + + unassignedEnd := int(float64(totalTickets) * 0.60) // 60% stay in Holding Pen + assignedEnd := int(float64(totalTickets) * 0.75) // 15% go to Chute + returnedEnd := int(float64(totalTickets) * 0.85) // 10% Returned to Security + falsePosEnd := int(float64(totalTickets) * 0.90) // 5% False Positive + patchedEnd := totalTickets // 10% Patched + + log.Printf("โณ Keeping Tickets 1 - %d in the Holding Pen (Unassigned)...", unassignedEnd) + bulkUpdateDB(db, unassignedEnd+1, assignedEnd, "Assigned Out", "it-network@ranch.com") + bulkUpdateDB(db, assignedEnd+1, returnedEnd, "Returned to Security", "it-endpoint@ranch.com") + bulkUpdateDB(db, returnedEnd+1, falsePosEnd, "False Positive", "security@ranch.com") + bulkUpdateDB(db, falsePosEnd+1, patchedEnd, "Patched", "it-network@ranch.com") + + log.Println("\n๐ STRESS TEST COMPLETE!") + log.Println("==========================================================================") + log.Println("๐ค The ranch is fully loaded with Core data. Go check the Dashboard!") + log.Println("๐ Login -> Email: stress@ranch.com | Password: password123") + log.Println("==========================================================================") +} + +// bulkUpdateDB executes direct SQLite updates +func bulkUpdateDB(db *sql.DB, startID, endID int, status, assignee string) { + if startID > endID { + return + } + fmt.Printf("Moving %d tickets (%d to %d) -> %s...\n", (endID-startID)+1, startID, endID, status) + + query := `UPDATE tickets SET status = ?, assignee = ?, latest_comment = 'Stress test auto-distribution', updated_at = CURRENT_TIMESTAMP WHERE id >= ? AND id <= ?` + + _, err := db.Exec(query, status, assignee, startID, endID) + if err != nil { + log.Fatalf("๐จ DB update failed: %v", err) + } +} diff --git a/data/.keep b/data/.keep new file mode 100644 index 0000000..e69de29 diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..f9d9566 --- /dev/null +++ b/go.mod @@ -0,0 +1,8 @@ +module epigas.gitea.cloud/RiskRancher/core + +go 1.26.0 + +require ( + github.com/mattn/go-sqlite3 v1.14.34 + golang.org/x/crypto v0.48.0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..70d63c0 --- /dev/null +++ b/go.sum @@ -0,0 +1,4 @@ +github.com/mattn/go-sqlite3 v1.14.34 h1:3NtcvcUnFBPsuRcno8pUtupspG/GM+9nZ88zgJcp6Zk= +github.com/mattn/go-sqlite3 v1.14.34/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= +golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= diff --git a/pkg/adapters/adapters.go b/pkg/adapters/adapters.go new file mode 100644 index 0000000..9ce4c76 --- /dev/null +++ b/pkg/adapters/adapters.go @@ -0,0 +1,147 @@ +package adapters + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "log" + "net/http" + "strconv" + "strings" + + domain2 "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +func (h *Handler) HandleGetAdapters(w http.ResponseWriter, r *http.Request) { + adapters, err := h.Store.GetAdapters(r.Context()) + if err != nil { + http.Error(w, "Database error", http.StatusInternalServerError) + return + } + json.NewEncoder(w).Encode(adapters) +} + +func (h *Handler) HandleCreateAdapter(w http.ResponseWriter, r *http.Request) { + var adapter domain2.Adapter + if err := json.NewDecoder(r.Body).Decode(&adapter); err != nil { + http.Error(w, "Invalid JSON", http.StatusBadRequest) + return + } + if err := h.Store.SaveAdapter(r.Context(), adapter); err != nil { + http.Error(w, "Failed to save adapter", http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusCreated) +} + +func (h *Handler) HandleDeleteAdapter(w http.ResponseWriter, r *http.Request) { + idStr := r.PathValue("id") + id, err := strconv.Atoi(idStr) + if err != nil { + http.Error(w, "Invalid adapter ID", http.StatusBadRequest) + return + } + + if err := h.Store.DeleteAdapter(r.Context(), id); err != nil { + http.Error(w, "Failed to delete adapter", http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusNoContent) +} + +func getJSONValue(data interface{}, path string) interface{} { + if path == "" || path == "." { + return data // The root IS the array + } + keys := strings.Split(path, ".") + current := data + for _, key := range keys { + if m, ok := current.(map[string]interface{}); ok { + current = m[key] + } else { + return nil // Path broke + } + } + return current +} + +func interfaceToString(val interface{}) string { + if val == nil { + return "" + } + if str, ok := val.(string); ok { + return str + } + return "" // Could expand this to handle ints/floats if needed +} + +// HandleAdapterIngest dynamically maps deeply nested JSON arrays into Tickets +func (h *Handler) HandleAdapterIngest(w http.ResponseWriter, r *http.Request) { + adapterName := r.PathValue("name") + adapter, err := h.Store.GetAdapterByName(r.Context(), adapterName) + if err != nil { + http.Error(w, "Adapter not found", http.StatusNotFound) + return + } + + var rawData interface{} + if err := json.NewDecoder(r.Body).Decode(&rawData); err != nil { + http.Error(w, "Invalid JSON payload", http.StatusBadRequest) + return + } + + findingsNode := getJSONValue(rawData, adapter.FindingsPath) + findingsArray, ok := findingsNode.([]interface{}) + if !ok { + http.Error(w, "Findings path did not resolve to a JSON array", http.StatusBadRequest) + return + } + + type groupKey struct { + Source string + Asset string + } + groupedTickets := make(map[groupKey][]domain2.Ticket) + + for _, item := range findingsArray { + finding, ok := item.(map[string]interface{}) + if !ok { + continue + } + + ticket := domain2.Ticket{ + Source: adapter.SourceName, + Status: "Waiting to be Triaged", // Explicitly set status + Title: interfaceToString(finding[adapter.MappingTitle]), + AssetIdentifier: interfaceToString(finding[adapter.MappingAsset]), + Severity: interfaceToString(finding[adapter.MappingSeverity]), + Description: interfaceToString(finding[adapter.MappingDescription]), + RecommendedRemediation: interfaceToString(finding[adapter.MappingRemediation]), + } + + if ticket.Title != "" && ticket.AssetIdentifier != "" { + hashInput := ticket.Source + "|" + ticket.AssetIdentifier + "|" + ticket.Title + hash := sha256.Sum256([]byte(hashInput)) + ticket.DedupeHash = hex.EncodeToString(hash[:]) + key := groupKey{Source: ticket.Source, Asset: ticket.AssetIdentifier} + groupedTickets[key] = append(groupedTickets[key], ticket) + } + } + + for key, batch := range groupedTickets { + err := h.Store.ProcessIngestionBatch(r.Context(), key.Source, key.Asset, batch) + if err != nil { + log.Printf("๐ฅ JSON Ingestion Error for Asset %s: %v", key.Asset, err) + // ๐ LOG THE BATCH FAILURE + h.Store.LogSync(r.Context(), key.Source, "Failed", len(batch), err.Error()) + http.Error(w, "Database error processing JSON batch", http.StatusInternalServerError) + return + } else { + // ๐ LOG THE SUCCESS + h.Store.LogSync(r.Context(), key.Source, "Success", len(batch), "") + } + } + + w.WriteHeader(http.StatusCreated) +} diff --git a/pkg/adapters/adapters_test.go b/pkg/adapters/adapters_test.go new file mode 100644 index 0000000..2351019 --- /dev/null +++ b/pkg/adapters/adapters_test.go @@ -0,0 +1,142 @@ +package adapters + +import ( + "bytes" + "context" + "database/sql" + "net/http" + "net/http/httptest" + "testing" + "time" + + "epigas.gitea.cloud/RiskRancher/core/pkg/datastore" + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +func setupTestAdapters(t *testing.T) (*Handler, *sql.DB) { + db := datastore.InitDB(":memory:") + store := datastore.NewSQLiteStore(db) + return NewHandler(store), db +} + +func GetVIPCookie(store domain.Store) *http.Cookie { + user, err := store.GetUserByEmail(context.Background(), "vip@RiskRancher.com") + if err != nil { + user, _ = store.CreateUser(context.Background(), "vip@RiskRancher.com", "Test VIP", "hash", "Sheriff") + } + + store.CreateSession(context.Background(), "vip_token_999", user.ID, time.Now().Add(1*time.Hour)) + return &http.Cookie{Name: "session_token", Value: "vip_token_999"} +} + +func TestHandleAdapterIngest(t *testing.T) { + h, db := setupTestAdapters(t) + defer db.Close() + + adapterPayload := []byte(`{"name": "Trivy Test", "source_name": "TrivyScanner", "findings_path": "Results", "mapping_title": "VulnerabilityID", "mapping_asset": "Target", "mapping_severity": "Severity"}`) + reqAdapter := httptest.NewRequest(http.MethodPost, "/api/adapters", bytes.NewBuffer(adapterPayload)) + reqAdapter.AddCookie(GetVIPCookie(h.Store)) + reqAdapter.Header.Set("Content-Type", "application/json") + rrAdapter := httptest.NewRecorder() + + h.HandleCreateAdapter(rrAdapter, reqAdapter) + + payload := []byte(`{"SchemaVersion": 2, "Results": [{"VulnerabilityID": "CVE-1", "Target": "A", "Severity": "HIGH"}]}`) + req := httptest.NewRequest(http.MethodPost, "/api/ingest/Trivy%20Test", bytes.NewBuffer(payload)) + req.AddCookie(GetVIPCookie(h.Store)) + req.Header.Set("Content-Type", "application/json") + + req.SetPathValue("name", "Trivy Test") + rr := httptest.NewRecorder() + h.HandleAdapterIngest(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("Expected 201 Created, got %d", rr.Code) + } +} + +func TestGetAdapters(t *testing.T) { + h, db := setupTestAdapters(t) + defer db.Close() + + db.Exec(`INSERT INTO data_adapters (name, source_name, findings_path, mapping_title, mapping_asset, mapping_severity) VALUES ('Trivy Test', 'Trivy', 'Results', 'VulnerabilityID', 'PkgName', 'Severity')`) + + req := httptest.NewRequest(http.MethodGet, "/api/adapters", nil) + req.AddCookie(GetVIPCookie(h.Store)) + rr := httptest.NewRecorder() + h.HandleGetAdapters(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("Expected 200 OK, got %d", rr.Code) + } +} + +func TestCreateAdapter(t *testing.T) { + h, db := setupTestAdapters(t) + defer db.Close() + + payload := []byte(`{"name": "AcmeSec", "source_name": "Acme", "findings_path": "issues", "mapping_title": "t", "mapping_asset": "a", "mapping_severity": "s"}`) + req := httptest.NewRequest(http.MethodPost, "/api/adapters", bytes.NewBuffer(payload)) + req.AddCookie(GetVIPCookie(h.Store)) + req.Header.Set("Content-Type", "application/json") + rr := httptest.NewRecorder() + + h.HandleCreateAdapter(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("Expected 201 Created, got %d", rr.Code) + } +} + +func TestJSONIngestion(t *testing.T) { + h, db := setupTestAdapters(t) + defer db.Close() + + _, err := db.Exec(` + INSERT INTO data_adapters ( + id, name, source_name, findings_path, + mapping_title, mapping_asset, mapping_severity + ) VALUES ( + 998, 'NestedScanner', 'DeepScan', 'scan_data.results', + 'vuln_name', 'target_ip', 'risk_level' + ) + `) + if err != nil { + t.Fatalf("Failed to setup nested adapter: %v", err) + } + + payload := []byte(`{ + "metadata": { "version": "1.0" }, + "scan_data": { + "results": [ + { + "vuln_name": "Log4j RCE", + "target_ip": "10.0.0.5", + "risk_level": "Critical" + } + ] + } + }`) + + req := httptest.NewRequest(http.MethodPost, "/api/ingest/NestedScanner", bytes.NewBuffer(payload)) + req.Header.Set("Content-Type", "application/json") + req.AddCookie(GetVIPCookie(h.Store)) + + req.SetPathValue("name", "NestedScanner") + rr := httptest.NewRecorder() + h.HandleAdapterIngest(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("Expected 201 Created, got %d. Body: %s", rr.Code, rr.Body.String()) + } + + var title, severity string + err = db.QueryRow("SELECT title, severity FROM tickets WHERE source = 'DeepScan'").Scan(&title, &severity) + if err != nil { + t.Fatalf("Failed to query ingested ticket: %v", err) + } + + if title != "Log4j RCE" || severity != "Critical" { + t.Errorf("JSON Mapping failed! Expected 'Log4j RCE' / 'Critical', got '%s' / '%s'", title, severity) + } +} diff --git a/pkg/adapters/handler.go b/pkg/adapters/handler.go new file mode 100644 index 0000000..8097ec6 --- /dev/null +++ b/pkg/adapters/handler.go @@ -0,0 +1,13 @@ +package adapters + +import ( + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +type Handler struct { + Store domain.Store +} + +func NewHandler(store domain.Store) *Handler { + return &Handler{Store: store} +} diff --git a/pkg/admin/admin.go b/pkg/admin/admin.go new file mode 100644 index 0000000..2397af5 --- /dev/null +++ b/pkg/admin/admin.go @@ -0,0 +1,62 @@ +package admin + +import ( + "encoding/json" + "net/http" + "strconv" +) + +func (h *Handler) HandleGetConfig(w http.ResponseWriter, r *http.Request) { + config, err := h.Store.GetAppConfig(r.Context()) + if err != nil { + http.Error(w, "Failed to fetch configuration", http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(config) +} + +func (h *Handler) HandleExportState(w http.ResponseWriter, r *http.Request) { + state, err := h.Store.ExportSystemState(r.Context()) + if err != nil { + http.Error(w, "Failed to generate system export", http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Disposition", "attachment; filename=RiskRancher_export.json") + w.WriteHeader(http.StatusOK) + + if err := json.NewEncoder(w).Encode(state); err != nil { + // Note: We can't change the HTTP status code here because we've already started streaming, + // but we can log the error if the stream breaks. + _ = err + } +} + +func (h *Handler) HandleGetLogs(w http.ResponseWriter, r *http.Request) { + filter := r.URL.Query().Get("filter") + page, err := strconv.Atoi(r.URL.Query().Get("page")) + if err != nil || page < 1 { + page = 1 + } + + limit := 15 + offset := (page - 1) * limit + + feed, total, err := h.Store.GetPaginatedActivityFeed(r.Context(), filter, limit, offset) + if err != nil { + http.Error(w, "Failed to load logs", http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]any{ + "feed": feed, + "total": total, + "page": page, + "limit": limit, + }) +} diff --git a/pkg/admin/admin_handlers.go b/pkg/admin/admin_handlers.go new file mode 100644 index 0000000..de3f462 --- /dev/null +++ b/pkg/admin/admin_handlers.go @@ -0,0 +1,192 @@ +package admin + +import ( + "encoding/json" + "net/http" + "strconv" + "strings" + + "epigas.gitea.cloud/RiskRancher/core/pkg/auth" +) + +// PasswordResetRequest is the expected JSON payload +type PasswordResetRequest struct { + NewPassword string `json:"new_password"` +} + +// HandleAdminResetPassword allows a Sheriff to forcefully overwrite a user's password. +func (h *Handler) HandleAdminResetPassword(w http.ResponseWriter, r *http.Request) { + idStr := r.PathValue("id") + userID, err := strconv.Atoi(idStr) + if err != nil { + http.Error(w, "Invalid user ID in URL", http.StatusBadRequest) + return + } + + var req PasswordResetRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON payload", http.StatusBadRequest) + return + } + + if req.NewPassword == "" { + http.Error(w, "New password cannot be empty", http.StatusBadRequest) + return + } + + hashedPassword, err := auth.HashPassword(req.NewPassword) + if err != nil { + http.Error(w, "Internal server error during hashing", http.StatusInternalServerError) + return + } + + err = h.Store.UpdateUserPassword(r.Context(), userID, hashedPassword) + if err != nil { + http.Error(w, "Failed to update user password", http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(map[string]string{ + "message": "Password reset successfully", + }) +} + +type RoleUpdateRequest struct { + GlobalRole string `json:"global_role"` +} + +// HandleUpdateUserRole allows a Sheriff to promote or demote a user. +func (h *Handler) HandleUpdateUserRole(w http.ResponseWriter, r *http.Request) { + idStr := r.PathValue("id") + userID, err := strconv.Atoi(idStr) + if err != nil { + http.Error(w, "Invalid user ID in URL", http.StatusBadRequest) + return + } + var req RoleUpdateRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON payload", http.StatusBadRequest) + return + } + + validRoles := map[string]bool{ + "Sheriff": true, "Wrangler": true, "RangeHand": true, "CircuitRider": true, "Magistrate": true, + } + if !validRoles[req.GlobalRole] { + http.Error(w, "Invalid role provided", http.StatusBadRequest) + return + } + + err = h.Store.UpdateUserRole(r.Context(), userID, req.GlobalRole) + if err != nil { + http.Error(w, "Failed to update user role", http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(map[string]string{ + "message": "User role updated successfully to " + req.GlobalRole, + }) +} + +// HandleDeactivateUser allows a Sheriff to safely offboard a user. +func (h *Handler) HandleDeactivateUser(w http.ResponseWriter, r *http.Request) { + idStr := r.PathValue("id") + userID, err := strconv.Atoi(idStr) + if err != nil { + http.Error(w, "Invalid user ID in URL", http.StatusBadRequest) + return + } + + err = h.Store.DeactivateUserAndReassign(r.Context(), userID) + if err != nil { + http.Error(w, "Failed to deactivate user", http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(map[string]string{ + "message": "User successfully deactivated and tickets reassigned.", + }) +} + +// CreateUserRequest is the payload the Sheriff sends to invite a new user +type CreateUserRequest struct { + Email string `json:"email"` + FullName string `json:"full_name"` + Password string `json:"password"` + GlobalRole string `json:"global_role"` +} + +// HandleCreateUser allows a Sheriff to manually provision a new user account. +func (h *Handler) HandleCreateUser(w http.ResponseWriter, r *http.Request) { + var req CreateUserRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON payload", http.StatusBadRequest) + return + } + + if req.Email == "" || req.FullName == "" || req.Password == "" || req.GlobalRole == "" { + http.Error(w, "Missing required fields", http.StatusBadRequest) + return + } + + validRoles := map[string]bool{ + "Sheriff": true, "Wrangler": true, "RangeHand": true, "CircuitRider": true, "Magistrate": true, + } + if !validRoles[req.GlobalRole] { + http.Error(w, "Invalid role provided", http.StatusBadRequest) + return + } + + hashedPassword, err := auth.HashPassword(req.Password) + if err != nil { + http.Error(w, "Internal server error during hashing", http.StatusInternalServerError) + return + } + + user, err := h.Store.CreateUser(r.Context(), req.Email, req.FullName, hashedPassword, req.GlobalRole) + if err != nil { + if strings.Contains(err.Error(), "UNIQUE constraint failed") { + http.Error(w, "Email already exists in the system", http.StatusConflict) + return + } + http.Error(w, "Failed to provision user", http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(map[string]interface{}{ + "message": "User provisioned successfully. Share the temporary password securely.", + "id": user.ID, + "email": user.Email, + "full_name": user.FullName, + "global_role": user.GlobalRole, + }) +} + +// HandleGetUsers returns a list of all users in the system for the Sheriff to manage. +func (h *Handler) HandleGetUsers(w http.ResponseWriter, r *http.Request) { + users, err := h.Store.GetAllUsers(r.Context()) + if err != nil { + http.Error(w, "Failed to fetch user roster", http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(users) +} + +// HandleGetWranglers returns a clean list of IT users for assignment dropdowns +func (h *Handler) HandleGetWranglers(w http.ResponseWriter, r *http.Request) { + wranglers, err := h.Store.GetWranglers(r.Context()) + if err != nil { + http.Error(w, "Failed to fetch wranglers", http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(wranglers) +} diff --git a/pkg/admin/admin_lifecycle.go b/pkg/admin/admin_lifecycle.go new file mode 100644 index 0000000..06786cb --- /dev/null +++ b/pkg/admin/admin_lifecycle.go @@ -0,0 +1,69 @@ +package admin + +import ( + "encoding/json" + "net/http" + "time" +) + +const CurrentAppVersion = "v1.0.0" + +type UpdateCheckResponse struct { + Status string `json:"status"` + CurrentVersion string `json:"current_version"` + LatestVersion string `json:"latest_version,omitempty"` + UpdateAvailable bool `json:"update_available"` + Message string `json:"message"` +} + +// HandleCheckUpdates pings gitea. If air-gapped, it returns manual instructions. +func (h *Handler) HandleCheckUpdates(w http.ResponseWriter, r *http.Request) { + respPayload := UpdateCheckResponse{ + CurrentVersion: CurrentAppVersion, + } + + client := http.Client{Timeout: 3 * time.Second} + + giteaURL := "https://epigas.gitea.cloud/api/v1/repos/RiskRancher/core/releases/latest" + resp, err := client.Get(giteaURL) + + if err != nil || resp.StatusCode != http.StatusOK { + respPayload.Status = "offline" + respPayload.Message = "No internet connection detected. To update an air-gapped server: Download the latest RiskRancher binary on a connected machine, transfer it via rsync or scp to this server, and restart the service." + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(respPayload) + return + } + defer resp.Body.Close() + + var ghRelease struct { + TagName string `json:"tag_name"` + } + if err := json.NewDecoder(resp.Body).Decode(&ghRelease); err == nil { + respPayload.Status = "online" + respPayload.LatestVersion = ghRelease.TagName + respPayload.UpdateAvailable = (ghRelease.TagName != CurrentAppVersion) + + if respPayload.UpdateAvailable { + respPayload.Message = "A new version is available! Please trigger a graceful shutdown and swap the binary." + } else { + respPayload.Message = "You are running the latest version." + } + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(respPayload) +} + +// HandleShutdown signals the application to close connections and exit cleanly +func (h *Handler) HandleShutdown(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"message": "Initiating graceful shutdown. The server will exit in 2 seconds..."}`)) + go func() { + time.Sleep(2 * time.Second) + }() +} diff --git a/pkg/admin/admin_test.go b/pkg/admin/admin_test.go new file mode 100644 index 0000000..4cd7337 --- /dev/null +++ b/pkg/admin/admin_test.go @@ -0,0 +1,64 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +func TestGetGlobalConfig(t *testing.T) { + app, db := setupTestAdmin(t) + defer db.Close() + + req := httptest.NewRequest(http.MethodGet, "/api/config", nil) + req.AddCookie(GetVIPCookie(app.Store)) + rr := httptest.NewRecorder() + + app.HandleGetConfig(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("Expected 200 OK, got %d. Body: %s", rr.Code, rr.Body.String()) + } + + var config domain.AppConfig + if err := json.NewDecoder(rr.Body).Decode(&config); err != nil { + t.Fatalf("Failed to decode response: %v", err) + } + + if config.Timezone != "America/New_York" || config.BusinessStart != 9 { + t.Errorf("Expected default config, got TZ: %s, Start: %d", config.Timezone, config.BusinessStart) + } +} + +func TestHandleDeactivateUser(t *testing.T) { + h, db := setupTestAdmin(t) + defer db.Close() + + targetUser, _ := h.Store.CreateUser(context.Background(), "fired@ranch.com", "Fired Fred", "hash", "RangeHand") + res, _ := db.Exec(`INSERT INTO tickets (title, status, severity, source, dedupe_hash) VALUES ('Freds Task', 'Waiting to be Triaged', 'High', 'Manual', 'fake-hash-123')`) + ticketID, _ := res.LastInsertId() + db.Exec(`INSERT INTO ticket_assignments (ticket_id, assignee, role) VALUES (?, 'fired@ranch.com', 'RangeHand')`, ticketID) + + targetURL := fmt.Sprintf("/api/admin/users/%d", targetUser.ID) + req := httptest.NewRequest(http.MethodDelete, targetURL, nil) + req.AddCookie(GetVIPCookie(h.Store)) + req.SetPathValue("id", fmt.Sprintf("%d", targetUser.ID)) + rr := httptest.NewRecorder() + + h.HandleDeactivateUser(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("Expected 200 OK, got %d. Body: %s", rr.Code, rr.Body.String()) + } + + var count int + db.QueryRow(`SELECT COUNT(*) FROM ticket_assignments WHERE assignee = 'fired@ranch.com'`).Scan(&count) + if count != 0 { + t.Errorf("Expected assignments to be cleared, but found %d", count) + } +} diff --git a/pkg/admin/admin_users_test.go b/pkg/admin/admin_users_test.go new file mode 100644 index 0000000..8068640 --- /dev/null +++ b/pkg/admin/admin_users_test.go @@ -0,0 +1,106 @@ +package admin + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" +) + +func TestHandleAdminResetPassword(t *testing.T) { + a, db := setupTestAdmin(t) + defer db.Close() + + targetUser, _ := a.Store.CreateUser(context.Background(), "forgetful@ranch.com", "Forgetful Fred", "old_hash", "RangeHand") + + payload := map[string]string{ + "new_password": "BrandNewSecurePassword123!", + } + body, _ := json.Marshal(payload) + + targetURL := fmt.Sprintf("/api/admin/users/%d/reset-password", targetUser.ID) + req := httptest.NewRequest(http.MethodPatch, targetURL, bytes.NewBuffer(body)) + + req.SetPathValue("id", fmt.Sprintf("%d", targetUser.ID)) + req.Header.Set("Content-Type", "application/json") + + rr := httptest.NewRecorder() + a.HandleAdminResetPassword(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("Expected 200 OK, got %d. Body: %s", rr.Code, rr.Body.String()) + } +} + +func TestHandleUpdateUserRole(t *testing.T) { + a, db := setupTestAdmin(t) + defer db.Close() + + _, _ = a.Store.CreateUser(context.Background(), "boss@ranch.com", "The Boss", "hash", "Sheriff") + targetUser, _ := a.Store.CreateUser(context.Background(), "rookie@ranch.com", "Rookie Ray", "hash", "RangeHand") + + payload := map[string]string{ + "global_role": "Wrangler", + } + body, _ := json.Marshal(payload) + + targetURL := fmt.Sprintf("/api/admin/users/%d/role", targetUser.ID) + req := httptest.NewRequest(http.MethodPatch, targetURL, bytes.NewBuffer(body)) + + req.AddCookie(GetVIPCookie(a.Store)) + req.SetPathValue("id", fmt.Sprintf("%d", targetUser.ID)) + req.Header.Set("Content-Type", "application/json") + + rr := httptest.NewRecorder() + a.HandleUpdateUserRole(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("Expected 200 OK, got %d. Body: %s", rr.Code, rr.Body.String()) + } +} + +func TestHandleCreateUser_SheriffInvite(t *testing.T) { + a, db := setupTestAdmin(t) + defer db.Close() + + payload := map[string]string{ + "email": "magistrate@ranch.com", + "full_name": "Mighty Magistrate", + "password": "TempPassword123!", + "global_role": "Magistrate", + } + body, _ := json.Marshal(payload) + req := httptest.NewRequest(http.MethodPost, "/api/admin/users", bytes.NewBuffer(body)) + + req.AddCookie(GetVIPCookie(a.Store)) + req.Header.Set("Content-Type", "application/json") + rr := httptest.NewRecorder() + a.HandleCreateUser(rr, req) + if rr.Code != http.StatusCreated { + t.Fatalf("Expected 201 Created, got %d. Body: %s", rr.Code, rr.Body.String()) + } + + var count int + db.QueryRow(`SELECT COUNT(*) FROM users WHERE email = 'magistrate@ranch.com'`).Scan(&count) + if count != 1 { + t.Errorf("Expected user to be created in the database") + } +} + +func TestHandleGetUsers(t *testing.T) { + a, db := setupTestAdmin(t) + defer db.Close() + + req := httptest.NewRequest(http.MethodGet, "/api/admin/users", nil) + + req.AddCookie(GetVIPCookie(a.Store)) + + rr := httptest.NewRecorder() + a.HandleGetUsers(rr, req) + if rr.Code != http.StatusOK { + t.Fatalf("Expected 200 OK, got %d. Body: %s", rr.Code, rr.Body.String()) + } +} diff --git a/pkg/admin/export_test.go b/pkg/admin/export_test.go new file mode 100644 index 0000000..c4b38ad --- /dev/null +++ b/pkg/admin/export_test.go @@ -0,0 +1,44 @@ +package admin + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +func TestExportSystemState(t *testing.T) { + app, db := setupTestAdmin(t) + defer db.Close() + _, err := db.Exec(` + INSERT INTO tickets (title, severity, status, dedupe_hash) + VALUES ('Export Test Vuln', 'High', 'Triaged', 'test_hash_123') + `) + if err != nil { + t.Fatalf("Failed to insert test ticket: %v", err) + } + req := httptest.NewRequest(http.MethodGet, "/api/admin/export", nil) + req.AddCookie(GetVIPCookie(app.Store)) + rr := httptest.NewRecorder() + + app.HandleExportState(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("Expected 200 OK, got %d", rr.Code) + } + + if rr.Header().Get("Content-Disposition") != "attachment; filename=RiskRancher_export.json" { + t.Errorf("Missing or incorrect Content-Disposition header") + } + + var state domain.ExportState + if err := json.NewDecoder(rr.Body).Decode(&state); err != nil { + t.Fatalf("Failed to decode exported JSON: %v", err) + } + + if len(state.Tickets) == 0 || state.Tickets[0].Title != "Export Test Vuln" { + t.Errorf("Export did not contain the expected ticket data") + } +} diff --git a/pkg/admin/handler.go b/pkg/admin/handler.go new file mode 100644 index 0000000..d31e6bd --- /dev/null +++ b/pkg/admin/handler.go @@ -0,0 +1,15 @@ +package admin + +import ( + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +// Handler encapsulates all Admin and Sheriff HTTP logic +type Handler struct { + Store domain.Store +} + +// NewHandler creates a new Admin Handler +func NewHandler(store domain.Store) *Handler { + return &Handler{Store: store} +} diff --git a/pkg/admin/helpers_test.go b/pkg/admin/helpers_test.go new file mode 100644 index 0000000..4863ce8 --- /dev/null +++ b/pkg/admin/helpers_test.go @@ -0,0 +1,30 @@ +package admin + +import ( + "context" + "database/sql" + "net/http" + "testing" + "time" + + "epigas.gitea.cloud/RiskRancher/core/pkg/datastore" + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +// setupTestAdmin returns the clean Admin Handler and the raw DB +func setupTestAdmin(t *testing.T) (*Handler, *sql.DB) { + db := datastore.InitDB(":memory:") + store := datastore.NewSQLiteStore(db) + return NewHandler(store), db +} + +// GetVIPCookie creates a dummy Sheriff user to bypass the Bouncer in tests +func GetVIPCookie(store domain.Store) *http.Cookie { + user, err := store.GetUserByEmail(context.Background(), "vip_test@RiskRancher.com") + if err != nil { + user, _ = store.CreateUser(context.Background(), "vip_test@RiskRancher.com", "Test VIP", "hash", "Sheriff") + } + token := "vip_test_token_999" + store.CreateSession(context.Background(), token, user.ID, time.Now().Add(1*time.Hour)) + return &http.Cookie{Name: "session_token", Value: token} +} diff --git a/pkg/admin/updates_test.go b/pkg/admin/updates_test.go new file mode 100644 index 0000000..2e2e425 --- /dev/null +++ b/pkg/admin/updates_test.go @@ -0,0 +1,36 @@ +package admin + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" +) + +func TestCheckUpdates_OfflineFallback(t *testing.T) { + + app, db := setupTestAdmin(t) + defer db.Close() + + req := httptest.NewRequest(http.MethodGet, "/api/admin/check-updates", nil) + req.AddCookie(GetVIPCookie(app.Store)) + rr := httptest.NewRecorder() + + app.HandleCheckUpdates(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("Expected 200 OK, got %d", rr.Code) + } + + var response map[string]interface{} + if err := json.NewDecoder(rr.Body).Decode(&response); err != nil { + t.Fatalf("Failed to decode response: %v", err) + } + + if _, exists := response["status"]; !exists { + t.Errorf("Expected 'status' field in response") + } + if _, exists := response["message"]; !exists { + t.Errorf("Expected 'message' field in response") + } +} diff --git a/pkg/analytics/analytics.go b/pkg/analytics/analytics.go new file mode 100644 index 0000000..014a897 --- /dev/null +++ b/pkg/analytics/analytics.go @@ -0,0 +1,17 @@ +package analytics + +import ( + "encoding/json" + "net/http" +) + +func (h *Handler) HandleGetAnalyticsSummary(w http.ResponseWriter, r *http.Request) { + summary, err := h.Store.GetAnalyticsSummary(r.Context()) + if err != nil { + http.Error(w, "Failed to generate analytics", http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(summary) +} diff --git a/pkg/analytics/analytics_test.go b/pkg/analytics/analytics_test.go new file mode 100644 index 0000000..ec2b89f --- /dev/null +++ b/pkg/analytics/analytics_test.go @@ -0,0 +1,60 @@ +package analytics + +import ( + "context" + "database/sql" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "epigas.gitea.cloud/RiskRancher/core/pkg/datastore" + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +func setupTestAnalytics(t *testing.T) (*Handler, *sql.DB) { + db := datastore.InitDB(":memory:") + store := datastore.NewSQLiteStore(db) + return NewHandler(store), db +} + +func GetVIPCookie(store domain.Store) *http.Cookie { + user, _ := store.CreateUser(context.Background(), "vip@RiskRancher.com", "Test VIP", "hash", "Sheriff") + store.CreateSession(context.Background(), "vip_token_999", user.ID, time.Now().Add(1*time.Hour)) + return &http.Cookie{Name: "session_token", Value: "vip_token_999"} +} + +func TestAnalyticsSummary(t *testing.T) { + h, db := setupTestAnalytics(t) + defer db.Close() + + _, err := db.Exec(`INSERT INTO tickets (source, title, severity, status, dedupe_hash) VALUES + ('Trivy', 'Container CVE', 'Critical', 'Waiting to be Triaged', 'hash1'), + ('Trivy', 'Old Lib', 'High', 'Waiting to be Triaged', 'hash2'), + ('Trivy', 'Patched Lib', 'Critical', 'Patched', 'hash3'), + ('Manual Pentest', 'SQLi', 'Critical', 'Waiting to be Triaged', 'hash4') + `) + if err != nil { + t.Fatalf("Failed to insert dummy data: %v", err) + } + + req := httptest.NewRequest(http.MethodGet, "/api/analytics/summary", nil) + req.AddCookie(GetVIPCookie(h.Store)) + rr := httptest.NewRecorder() + + h.HandleGetAnalyticsSummary(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("Expected 200 OK, got %d. Body: %s", rr.Code, rr.Body.String()) + } + + var summary map[string]int + if err := json.NewDecoder(rr.Body).Decode(&summary); err != nil { + t.Fatalf("Failed to decode JSON: %v", err) + } + + if summary["Total_Open"] != 3 { + t.Errorf("Expected 3 total open tickets, got %d", summary["Total_Open"]) + } +} diff --git a/pkg/analytics/handler.go b/pkg/analytics/handler.go new file mode 100644 index 0000000..e4e1e17 --- /dev/null +++ b/pkg/analytics/handler.go @@ -0,0 +1,13 @@ +package analytics + +import ( + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +type Handler struct { + Store domain.Store +} + +func NewHandler(store domain.Store) *Handler { + return &Handler{Store: store} +} diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go new file mode 100644 index 0000000..411f1fc --- /dev/null +++ b/pkg/auth/auth.go @@ -0,0 +1,41 @@ +package auth + +import ( + "encoding/base64" + "math/rand" + + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" + "golang.org/x/crypto/bcrypt" +) + +// Handler encapsulates all Identity and Access HTTP logic +type Handler struct { + Store domain.Store +} + +// NewHandler creates a new Auth Handler +func NewHandler(store domain.Store) *Handler { + return &Handler{Store: store} +} + +// HashPassword takes a plaintext password, automatically generates a secure salt +func HashPassword(password string) (string, error) { + bytes, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) + return string(bytes), err +} + +// CheckPasswordHash securely compares a plaintext password with a stored bcrypt hash. +func CheckPasswordHash(password, hash string) bool { + err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password)) + return err == nil +} + +// GenerateSessionToken creates a cryptographically secure random string +func GenerateSessionToken() (string, error) { + b := make([]byte, 32) + _, err := rand.Read(b) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(b), nil +} diff --git a/pkg/auth/auth_handlers.go b/pkg/auth/auth_handlers.go new file mode 100644 index 0000000..2d7dd92 --- /dev/null +++ b/pkg/auth/auth_handlers.go @@ -0,0 +1,140 @@ +package auth + +import ( + "encoding/json" + "net/http" + "strings" + "time" +) + +const SessionCookieName = "session_token" + +// RegisterRequest represents the JSON payload expected for user registration. +type RegisterRequest struct { + Email string `json:"email"` + FullName string `json:"full_name"` + Password string `json:"password"` + GlobalRole string `json:"global_role"` +} + +// LoginRequest represents the JSON payload expected for user login. +type LoginRequest struct { + Email string `json:"email"` + Password string `json:"password"` +} + +// HandleRegister processes new user signups. +func (h *Handler) HandleRegister(w http.ResponseWriter, r *http.Request) { + var req RegisterRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid request body", http.StatusBadRequest) + return + } + + count, err := h.Store.GetUserCount(r.Context()) + if err != nil { + http.Error(w, "Internal server error", http.StatusInternalServerError) + return + } + + if count > 0 { + http.Error(w, "Forbidden: System already initialized. Contact your Sheriff for an account.", http.StatusForbidden) + return + } + + req.GlobalRole = "Sheriff" + + if req.Email == "" || req.Password == "" || req.FullName == "" { + http.Error(w, "Missing required fields", http.StatusBadRequest) + return + } + + hashedPassword, err := HashPassword(req.Password) + if err != nil { + http.Error(w, "Failed to hash password", http.StatusInternalServerError) + return + } + + user, err := h.Store.CreateUser(r.Context(), req.Email, req.FullName, hashedPassword, req.GlobalRole) + if err != nil { + if strings.Contains(err.Error(), "UNIQUE constraint failed") { + http.Error(w, "Email already exists", http.StatusConflict) + return + } + http.Error(w, "Failed to create user", http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(user) +} + +// HandleLogin authenticates a user and issues a session cookie. +func (h *Handler) HandleLogin(w http.ResponseWriter, r *http.Request) { + var req LoginRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON payload", http.StatusBadRequest) + return + } + + user, err := h.Store.GetUserByEmail(r.Context(), req.Email) + if err != nil { + http.Error(w, "Invalid credentials", http.StatusUnauthorized) + return + } + + if !CheckPasswordHash(req.Password, user.PasswordHash) { + http.Error(w, "Invalid credentials", http.StatusUnauthorized) + return + } + + token, err := GenerateSessionToken() + if err != nil { + http.Error(w, "Failed to generate session", http.StatusInternalServerError) + return + } + + expiresAt := time.Now().Add(24 * time.Hour) + if err := h.Store.CreateSession(r.Context(), token, user.ID, expiresAt); err != nil { + http.Error(w, "Failed to persist session", http.StatusInternalServerError) + return + } + + http.SetCookie(w, &http.Cookie{ + Name: "session_token", + Value: token, + Expires: expiresAt, + Path: "/", + HttpOnly: true, + Secure: false, // Set to TRUE in production for HTTPS! + SameSite: http.SameSiteLaxMode, + }) + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(user) +} + +// HandleLogout destroys the user's session in the database and clears their cookie. +func (h *Handler) HandleLogout(w http.ResponseWriter, r *http.Request) { + cookie, err := r.Cookie(SessionCookieName) + + if err == nil && cookie.Value != "" { + _ = h.Store.DeleteSession(r.Context(), cookie.Value) + } + + http.SetCookie(w, &http.Cookie{ + Name: SessionCookieName, + Value: "", + Path: "/", + Expires: time.Unix(0, 0), + MaxAge: -1, + HttpOnly: true, + Secure: true, // Ensures it's only sent over HTTPS + SameSite: http.SameSiteStrictMode, + }) + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(map[string]string{ + "message": "Successfully logged out", + }) +} diff --git a/pkg/auth/auth_handlers_test.go b/pkg/auth/auth_handlers_test.go new file mode 100644 index 0000000..c274f0a --- /dev/null +++ b/pkg/auth/auth_handlers_test.go @@ -0,0 +1,111 @@ +package auth + +import ( + "bytes" + "database/sql" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "epigas.gitea.cloud/RiskRancher/core/pkg/datastore" +) + +func setupTestAuth(t *testing.T) (*Handler, *sql.DB) { + db := datastore.InitDB(":memory:") + + store := datastore.NewSQLiteStore(db) + + h := NewHandler(store) + + return h, db +} + +func TestAuthHandlers(t *testing.T) { + a, db := setupTestAuth(t) + defer db.Close() + + t.Run("Successful Registration", func(t *testing.T) { + payload := map[string]string{ + "email": "admin@RiskRancher.com", + "full_name": "Doc Holliday", + "password": "SuperSecretPassword123!", + "global_role": "Sheriff", // Use a valid role! + } + body, _ := json.Marshal(payload) + + req := httptest.NewRequest(http.MethodPost, "/api/auth/register", bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + rr := httptest.NewRecorder() + + a.HandleRegister(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("Expected 201 Created for registration, got %d", rr.Code) + } + }) + + t.Run("Successful Login Issues Cookie", func(t *testing.T) { + payload := map[string]string{ + "email": "admin@RiskRancher.com", + "password": "SuperSecretPassword123!", + } + body, _ := json.Marshal(payload) + + req := httptest.NewRequest(http.MethodPost, "/api/auth/login", bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + rr := httptest.NewRecorder() + + a.HandleLogin(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("Expected 200 OK for successful login, got %d", rr.Code) + } + + cookies := rr.Result().Cookies() + if len(cookies) == 0 { + t.Fatalf("Expected a session cookie to be set, but none was found") + } + if cookies[0].Name != "session_token" { + t.Errorf("Expected cookie named 'session_token', got '%s'", cookies[0].Name) + } + }) + + t.Run("Failed Login Rejects Access", func(t *testing.T) { + payload := map[string]string{ + "email": "admin@RiskRancher.com", + "password": "WrongPassword!", + } + body, _ := json.Marshal(payload) + + req := httptest.NewRequest(http.MethodPost, "/api/auth/login", bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + rr := httptest.NewRecorder() + + a.HandleLogin(rr, req) + + if rr.Code != http.StatusUnauthorized { + t.Fatalf("Expected 401 Unauthorized for wrong password, got %d", rr.Code) + } + }) +} + +func TestHandleLogout(t *testing.T) { + a, db := setupTestAuth(t) + defer db.Close() + + req := httptest.NewRequest(http.MethodPost, "/api/auth/logout", nil) + + cookie := &http.Cookie{ + Name: SessionCookieName, + Value: "fake-session-token-123", + } + req.AddCookie(cookie) + + rr := httptest.NewRecorder() + a.HandleLogout(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("Expected 200 OK, got %d. Body: %s", rr.Code, rr.Body.String()) + } +} diff --git a/pkg/auth/auth_test.go b/pkg/auth/auth_test.go new file mode 100644 index 0000000..5892a16 --- /dev/null +++ b/pkg/auth/auth_test.go @@ -0,0 +1,49 @@ +package auth + +import ( + "testing" +) + +func TestPasswordHashing(t *testing.T) { + password := "SuperSecretSOCPassword123!" + + hash, err := HashPassword(password) + if err != nil { + t.Fatalf("Failed to hash password: %v", err) + } + + if hash == password { + t.Fatalf("Security failure: Hash matches plain text!") + } + if len(hash) == 0 { + t.Fatalf("Hash is empty") + } + + isValid := CheckPasswordHash(password, hash) + if !isValid { + t.Errorf("Expected valid password to match hash, but it failed") + } + + isInvalid := CheckPasswordHash("WrongPassword!", hash) + if isInvalid { + t.Errorf("Security failure: Incorrect password returned true!") + } +} + +func TestGenerateSessionToken(t *testing.T) { + + token1, err1 := GenerateSessionToken() + token2, err2 := GenerateSessionToken() + + if err1 != nil || err2 != nil { + t.Fatalf("Failed to generate session tokens") + } + + if len(token1) < 32 { + t.Errorf("Token is too short for security standards: %d chars", len(token1)) + } + + if token1 == token2 { + t.Errorf("CRITICAL: RNG generated the exact same token twice: %s", token1) + } +} diff --git a/pkg/auth/middleware.go b/pkg/auth/middleware.go new file mode 100644 index 0000000..762acc8 --- /dev/null +++ b/pkg/auth/middleware.go @@ -0,0 +1,56 @@ +package auth + +import ( + "context" + "net/http" + "time" +) + +type contextKey string + +const UserIDKey contextKey = "user_id" + +func (h *Handler) RequireAuth(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cookie, err := r.Cookie("session_token") + if err != nil { + http.Error(w, "Unauthorized: Missing session cookie", http.StatusUnauthorized) + return + } + + session, err := h.Store.GetSession(r.Context(), cookie.Value) + if err != nil { + http.Error(w, "Unauthorized: Invalid session", http.StatusUnauthorized) + return + } + + if session.ExpiresAt.Before(time.Now()) { + http.Error(w, "Unauthorized: Session expired", http.StatusUnauthorized) + return + } + + ctx := context.WithValue(r.Context(), UserIDKey, session.UserID) + + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +// RequireUIAuth checks for a valid session and redirects to /login if it fails, +func (h *Handler) RequireUIAuth(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cookie, err := r.Cookie("session_token") + if err != nil { + http.Redirect(w, r, "/login", http.StatusSeeOther) + return + } + + session, err := h.Store.GetSession(r.Context(), cookie.Value) + if err != nil || session.ExpiresAt.Before(time.Now()) { + http.Redirect(w, r, "/login", http.StatusSeeOther) + return + } + + ctx := context.WithValue(r.Context(), UserIDKey, session.UserID) + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} diff --git a/pkg/auth/middleware_test.go b/pkg/auth/middleware_test.go new file mode 100644 index 0000000..029d2b2 --- /dev/null +++ b/pkg/auth/middleware_test.go @@ -0,0 +1,61 @@ +package auth + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + "time" +) + +func TestRequireAuthMiddleware(t *testing.T) { + h, db := setupTestAuth(t) + defer db.Close() + + user, err := h.Store.CreateUser(context.Background(), "vip@RiskRancher.com", "Wyatt Earp", "fake_hash", "Sheriff") + if err != nil { + t.Fatalf("Failed to seed test user: %v", err) + } + + validToken := "valid_test_token_123" + expiresAt := time.Now().Add(1 * time.Hour) + err = h.Store.CreateSession(context.Background(), validToken, user.ID, expiresAt) + if err != nil { + t.Fatalf("Failed to seed test session: %v", err) + } + + dummyHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte("Welcome to the VIP room")) + }) + protectedHandler := h.RequireAuth(dummyHandler) + + tests := []struct { + name string + cookieName string + cookieValue string + expectedStatus int + }{ + {"Missing Cookie", "", "", http.StatusUnauthorized}, + {"Wrong Cookie Name", "wrong_name", validToken, http.StatusUnauthorized}, + {"Invalid Token", "session_token", "fake_invalid_token", http.StatusUnauthorized}, + {"Valid Token", "session_token", validToken, http.StatusOK}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/", nil) + + if tt.cookieName != "" { + req.AddCookie(&http.Cookie{Name: tt.cookieName, Value: tt.cookieValue}) + } + + rr := httptest.NewRecorder() + protectedHandler.ServeHTTP(rr, req) + + if rr.Code != tt.expectedStatus { + t.Errorf("Expected status %d, got %d", tt.expectedStatus, rr.Code) + } + }) + } +} diff --git a/pkg/auth/rbac_middleware.go b/pkg/auth/rbac_middleware.go new file mode 100644 index 0000000..9e335ee --- /dev/null +++ b/pkg/auth/rbac_middleware.go @@ -0,0 +1,74 @@ +package auth + +import ( + "net/http" +) + +// RequireRole acts as the checker +func (h *Handler) RequireRole(requiredRole string) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + + userIDVal := r.Context().Value(UserIDKey) + if userIDVal == nil { + http.Error(w, "Unauthorized: No user context", http.StatusUnauthorized) + return + } + + userID, ok := userIDVal.(int) + if !ok { + http.Error(w, "Internal Server Error: Invalid user context", http.StatusInternalServerError) + return + } + + user, err := h.Store.GetUserByID(r.Context(), userID) + if err != nil { + http.Error(w, "Forbidden: User not found", http.StatusForbidden) + return + } + + if user.GlobalRole != requiredRole { + http.Error(w, "Forbidden: Insufficient permissions", http.StatusForbidden) + return + } + + next.ServeHTTP(w, r) + }) + } +} + +// RequireAnyRole allows access if the user has ANY of the provided roles. +func (h *Handler) RequireAnyRole(allowedRoles ...string) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + + userIDVal := r.Context().Value(UserIDKey) + if userIDVal == nil { + http.Error(w, "Unauthorized: No user context", http.StatusUnauthorized) + return + } + + userID, ok := userIDVal.(int) + if !ok { + http.Error(w, "Internal Server Error: Invalid user context", http.StatusInternalServerError) + return + } + + user, err := h.Store.GetUserByID(r.Context(), userID) + if err != nil { + http.Error(w, "Forbidden: User not found", http.StatusForbidden) + return + } + + for _, role := range allowedRoles { + if user.GlobalRole == role { + // Match found! Open the door. + next.ServeHTTP(w, r) + return + } + } + + http.Error(w, "Forbidden: Insufficient permissions", http.StatusForbidden) + }) + } +} diff --git a/pkg/auth/rbac_middleware_test.go b/pkg/auth/rbac_middleware_test.go new file mode 100644 index 0000000..3d28eff --- /dev/null +++ b/pkg/auth/rbac_middleware_test.go @@ -0,0 +1,49 @@ +package auth + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" +) + +func TestRequireRoleMiddleware(t *testing.T) { + a, db := setupTestAuth(t) + defer db.Close() + + sheriff, _ := a.Store.CreateUser(context.Background(), "sheriff@ranch.com", "Wyatt Earp", "hash", "Sheriff") + rangeHand, _ := a.Store.CreateUser(context.Background(), "hand@ranch.com", "Jesse James", "hash", "RangeHand") + + vipHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte("Welcome to the Manager's Office")) + }) + + protectedHandler := a.RequireRole("Sheriff")(vipHandler) + + tests := []struct { + name string + userID int + expectedStatus int + }{ + {"Valid Sheriff Access", sheriff.ID, http.StatusOK}, + {"Denied RangeHand Access", rangeHand.ID, http.StatusForbidden}, + {"Unknown User", 9999, http.StatusForbidden}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/admin/passwords", nil) + + ctx := context.WithValue(req.Context(), UserIDKey, tt.userID) + req = req.WithContext(ctx) + + rr := httptest.NewRecorder() + protectedHandler.ServeHTTP(rr, req) + + if rr.Code != tt.expectedStatus { + t.Errorf("Expected status %d, got %d", tt.expectedStatus, rr.Code) + } + }) + } +} diff --git a/pkg/datastore/auth_db.go b/pkg/datastore/auth_db.go new file mode 100644 index 0000000..0015079 --- /dev/null +++ b/pkg/datastore/auth_db.go @@ -0,0 +1,187 @@ +package datastore + +import ( + "context" + "database/sql" + "errors" + "time" + + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +// ErrNotFound is a standard error we can use across our handlers +var ErrNotFound = errors.New("record not found") + +func (s *SQLiteStore) CreateUser(ctx context.Context, email, fullName, passwordHash, globalRole string) (*domain.User, error) { + query := `INSERT INTO users (email, full_name, password_hash, global_role) VALUES (?, ?, ?, ?)` + + result, err := s.DB.ExecContext(ctx, query, email, fullName, passwordHash, globalRole) + if err != nil { + return nil, err + } + + id, err := result.LastInsertId() + if err != nil { + return nil, err + } + + return &domain.User{ + ID: int(id), + Email: email, + FullName: fullName, + PasswordHash: passwordHash, + GlobalRole: globalRole, + }, nil +} + +func (s *SQLiteStore) GetUserByEmail(ctx context.Context, email string) (*domain.User, error) { + var user domain.User + query := "SELECT id, email, password_hash, full_name, global_role FROM users WHERE email = ? AND is_active = 1" + + err := s.DB.QueryRowContext(ctx, query, email).Scan( + &user.ID, + &user.Email, + &user.PasswordHash, + &user.FullName, + &user.GlobalRole, + ) + + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, sql.ErrNoRows // Bouncer says no (either wrong email, or deactivated) + } + return nil, err + } + + return &user, nil +} + +func (s *SQLiteStore) CreateSession(ctx context.Context, token string, userID int, expiresAt time.Time) error { + query := `INSERT INTO sessions (session_token, user_id, expires_at) VALUES (?, ?, ?)` + _, err := s.DB.ExecContext(ctx, query, token, userID, expiresAt) + return err +} + +func (s *SQLiteStore) GetSession(ctx context.Context, token string) (*domain.Session, error) { + query := `SELECT session_token, user_id, expires_at FROM sessions WHERE session_token = ?` + + var session domain.Session + err := s.DB.QueryRowContext(ctx, query, token).Scan( + &session.Token, + &session.UserID, + &session.ExpiresAt, + ) + + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, ErrNotFound + } + return nil, err + } + + return &session, nil +} + +// GetUserByID fetches a user's full record, including their role +func (s *SQLiteStore) GetUserByID(ctx context.Context, id int) (*domain.User, error) { + query := `SELECT id, email, full_name, password_hash, global_role FROM users WHERE id = ?` + + var user domain.User + err := s.DB.QueryRowContext(ctx, query, id).Scan( + &user.ID, + &user.Email, + &user.FullName, + &user.PasswordHash, + &user.GlobalRole, + ) + + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, ErrNotFound + } + return nil, err + } + + return &user, nil +} + +// UpdateUserPassword allows an administrator to overwrite a forgotten password +func (s *SQLiteStore) UpdateUserPassword(ctx context.Context, id int, newPasswordHash string) error { + query := `UPDATE users SET password_hash = ? WHERE id = ?` + + _, err := s.DB.ExecContext(ctx, query, newPasswordHash, id) + return err +} + +// UpdateUserRole promotes or demotes a user by updating their global_role. +func (s *SQLiteStore) UpdateUserRole(ctx context.Context, id int, newRole string) error { + query := `UPDATE users SET global_role = ? WHERE id = ?` + + _, err := s.DB.ExecContext(ctx, query, newRole, id) + return err +} + +// DeactivateUserAndReassign securely offboards a user, kicks them out +func (s *SQLiteStore) DeactivateUserAndReassign(ctx context.Context, userID int) error { + var email string + if err := s.DB.QueryRowContext(ctx, "SELECT email FROM users WHERE id = ?", userID).Scan(&email); err != nil { + return err + } + + tx, err := s.DB.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + _, err = tx.ExecContext(ctx, `UPDATE users SET is_active = 0 WHERE id = ?`, userID) + if err != nil { + return err + } + + _, err = tx.ExecContext(ctx, `DELETE FROM ticket_assignments WHERE assignee = ?`, email) + if err != nil { + return err + } + + _, err = tx.ExecContext(ctx, `DELETE FROM sessions WHERE user_id = ?`, userID) + if err != nil { + return err + } + + return tx.Commit() +} + +// GetUserCount returns the total number of registered users in the system. +func (s *SQLiteStore) GetUserCount(ctx context.Context) (int, error) { + var count int + err := s.DB.QueryRowContext(ctx, `SELECT COUNT(*) FROM users`).Scan(&count) + if err != nil { + return 0, err + } + return count, nil +} + +func (s *SQLiteStore) GetAllUsers(ctx context.Context) ([]*domain.User, error) { + // Notice the return type is now []*domain.User + rows, err := s.DB.QueryContext(ctx, "SELECT id, email, full_name, global_role FROM users WHERE is_active = 1") + if err != nil { + return nil, err + } + defer rows.Close() + + var users []*domain.User + for rows.Next() { + var u domain.User + if err := rows.Scan(&u.ID, &u.Email, &u.FullName, &u.GlobalRole); err == nil { + users = append(users, &u) // ๐ Appending the memory address! + } + } + return users, nil +} + +// DeleteSession removes the token from the database so it can never be used again. +func (s *SQLiteStore) DeleteSession(ctx context.Context, token string) error { + _, err := s.DB.ExecContext(ctx, `DELETE FROM sessions WHERE token = ?`, token) + return err +} diff --git a/pkg/datastore/auth_db_test.go b/pkg/datastore/auth_db_test.go new file mode 100644 index 0000000..462f3b1 --- /dev/null +++ b/pkg/datastore/auth_db_test.go @@ -0,0 +1,73 @@ +package datastore + +import ( + "context" + "testing" + "time" +) + +func TestUserAndSessionLifecycle(t *testing.T) { + store := setupTestDB(t) + defer store.DB.Close() + + _, err := store.DB.Exec(` + CREATE TABLE users (id INTEGER PRIMARY KEY AUTOINCREMENT, email TEXT UNIQUE, full_name TEXT, password_hash TEXT, global_role TEXT, is_active BOOLEAN DEFAULT 1); + CREATE TABLE sessions (session_token TEXT PRIMARY KEY, user_id INTEGER, expires_at DATETIME); + `) + + ctx := context.Background() + + user, err := store.CreateUser(ctx, "admin@RiskRancher.com", "doc", "fake_bcrypt_hash", "Admin") + if err != nil { + t.Fatalf("Failed to create user: %v", err) + } + if user.ID == 0 { + t.Errorf("Expected database to return a valid auto-incremented ID, got 0") + } + + _, err = store.CreateUser(ctx, "admin@RiskRancher.com", "doc", "another_hash", "Analyst") + if err == nil { + t.Fatalf("Security Failure: Database allowed a duplicate email address!") + } + + fetchedUser, err := store.GetUserByEmail(ctx, "admin@RiskRancher.com") + if err != nil { + t.Fatalf("Failed to fetch user by email: %v", err) + } + if fetchedUser.GlobalRole != "Admin" { + t.Errorf("Expected role 'Admin', got '%s'", fetchedUser.GlobalRole) + } + + expires := time.Now().Add(24 * time.Hour) + err = store.CreateSession(ctx, "fake_secure_token", user.ID, expires) + if err != nil { + t.Fatalf("Failed to create session: %v", err) + } + + session, err := store.GetSession(ctx, "fake_secure_token") + if err != nil { + t.Fatalf("Failed to retrieve session: %v", err) + } + if session.UserID != user.ID { + t.Errorf("Session mapped to wrong user! Expected %d, got %d", user.ID, session.UserID) + } + + userByID, err := store.GetUserByID(ctx, user.ID) + if err != nil { + t.Fatalf("Failed to fetch user by ID: %v", err) + } + if userByID.Email != user.Email { + t.Errorf("GetUserByID returned wrong user. Expected %s, got %s", user.Email, userByID.Email) + } + + newHash := "new_secure_bcrypt_hash_999" + err = store.UpdateUserPassword(ctx, user.ID, newHash) + if err != nil { + t.Fatalf("Failed to update user password: %v", err) + } + + updatedUser, _ := store.GetUserByID(ctx, user.ID) + if updatedUser.PasswordHash != newHash { + t.Errorf("Password hash did not update in the database") + } +} diff --git a/pkg/datastore/concurrency_test.go b/pkg/datastore/concurrency_test.go new file mode 100644 index 0000000..2924977 --- /dev/null +++ b/pkg/datastore/concurrency_test.go @@ -0,0 +1,92 @@ +package datastore + +import ( + "database/sql" + "fmt" + "path/filepath" + "sync" + "testing" + + _ "github.com/mattn/go-sqlite3" +) + +// runChaosEngine fires 100 concurrent workers at the provided database connection +func runChaosEngine(db *sql.DB) int { + db.Exec(`CREATE TABLE IF NOT EXISTS tickets (id INTEGER PRIMARY KEY AUTOINCREMENT, title TEXT, status TEXT)`) + db.Exec(`INSERT INTO tickets (title, status) VALUES ('Seed', 'Open')`) + + var wg sync.WaitGroup + errCh := make(chan error, 1000) + + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < 20; i++ { + tx, _ := db.Begin() + for j := 0; j < 50; j++ { + tx.Exec(`INSERT INTO tickets (title, status) VALUES ('Vuln', 'Open')`) + } + if err := tx.Commit(); err != nil { + errCh <- err + } + } + }() + + for w := 0; w < 20; w++ { + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < 20; i++ { + if _, err := db.Exec(`UPDATE tickets SET status = 'Patched' WHERE id = 1`); err != nil { + errCh <- err + } + } + }() + } + + for r := 0; r < 79; r++ { + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < 50; i++ { + rows, err := db.Query(`SELECT COUNT(*) FROM tickets`) + if err != nil { + errCh <- err + } else { + rows.Close() + } + } + }() + } + + wg.Wait() + close(errCh) + + errorCount := 0 + for range errCh { + errorCount++ + } + return errorCount +} + +func TestSQLiteConcurrency_Tuned_Succeeds(t *testing.T) { + tempDir := t.TempDir() + dbPath := filepath.Join(tempDir, "tuned.db") + + dsn := fmt.Sprintf("%s?_journal_mode=WAL&_synchronous=NORMAL&_busy_timeout=5000", dbPath) + db, err := sql.Open("sqlite3", dsn) + if err != nil { + t.Fatalf("Failed to open tuned DB: %v", err) + } + defer db.Close() + + db.SetMaxOpenConns(25) + db.SetMaxIdleConns(25) + + errors := runChaosEngine(db) + + if errors > 0 { + t.Fatalf("FAILED! Tuned engine threw %d errors. It should have queued them perfectly.", errors) + } + t.Log("SUCCESS: 100 concurrent workers survived SQLite chaos with ZERO locked errors.") +} diff --git a/pkg/datastore/db.go b/pkg/datastore/db.go new file mode 100644 index 0000000..3aa9c3f --- /dev/null +++ b/pkg/datastore/db.go @@ -0,0 +1,94 @@ +package datastore + +import ( + "database/sql" + "embed" + _ "embed" + "encoding/json" + "log" + "time" + + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" + _ "github.com/mattn/go-sqlite3" +) + +//go:embed schema.sql +var schemaSQL string + +//go:embed defaults/*.json +var defaultAdaptersFS embed.FS + +func InitDB(filepath string) *sql.DB { + dsn := "file:" + filepath + "?_journal=WAL&_timeout=5000&_sync=1&_fk=1" + + db, err := sql.Open("sqlite3", dsn) + if err != nil { + log.Fatalf("Failed to open database: %v", err) + } + + db.SetMaxOpenConns(25) + db.SetMaxIdleConns(25) + db.SetConnMaxLifetime(5 * time.Minute) + + migrations := []string{ + schemaSQL, + } + + if err := RunMigrations(db, migrations); err != nil { + log.Fatalf("Database upgrade failed! Halting boot to protect data: %v", err) + } + + SeedAdapters(db) + + return db +} + +// SeedAdapters reads the embedded JSON files and UPSERTs them into SQLite +func SeedAdapters(db *sql.DB) { + files, err := defaultAdaptersFS.ReadDir("defaults") + if err != nil { + log.Printf("No default adapters found or failed to read: %v", err) + return + } + + for _, file := range files { + data, err := defaultAdaptersFS.ReadFile("defaults/" + file.Name()) + if err != nil { + log.Printf("Failed to read adapter file %s: %v", file.Name(), err) + continue + } + + var adapter domain.Adapter + if err := json.Unmarshal(data, &adapter); err != nil { + log.Printf("Failed to parse adapter JSON %s: %v", file.Name(), err) + continue + } + + query := ` + INSERT INTO data_adapters ( + name, source_name, findings_path, mapping_title, + mapping_asset, mapping_severity, mapping_description, mapping_remediation + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(name) DO UPDATE SET + source_name = excluded.source_name, + findings_path = excluded.findings_path, + mapping_title = excluded.mapping_title, + mapping_asset = excluded.mapping_asset, + mapping_severity = excluded.mapping_severity, + mapping_description = excluded.mapping_description, + mapping_remediation = excluded.mapping_remediation, + updated_at = CURRENT_TIMESTAMP; + ` + + _, err = db.Exec(query, + adapter.Name, adapter.SourceName, adapter.FindingsPath, adapter.MappingTitle, + adapter.MappingAsset, adapter.MappingSeverity, adapter.MappingDescription, adapter.MappingRemediation, + ) + + if err != nil { + log.Printf("Failed to seed adapter %s to DB: %v", adapter.Name, err) + } else { + log.Printf("๐ Successfully loaded adapter: %s", adapter.Name) + } + } +} diff --git a/pkg/datastore/defaults/trivy.json b/pkg/datastore/defaults/trivy.json new file mode 100644 index 0000000..7a0bdf5 --- /dev/null +++ b/pkg/datastore/defaults/trivy.json @@ -0,0 +1,10 @@ +{ + "name": "Trivy Container Scan", + "source_name": "Trivy", + "findings_path": "Results.0.Vulnerabilities", + "mapping_title": "VulnerabilityID", + "mapping_asset": "PkgName", + "mapping_severity": "Severity", + "mapping_description": "Title", + "mapping_remediation": "FixedVersion" +} \ No newline at end of file diff --git a/pkg/datastore/diff_test.go b/pkg/datastore/diff_test.go new file mode 100644 index 0000000..847c665 --- /dev/null +++ b/pkg/datastore/diff_test.go @@ -0,0 +1,84 @@ +package datastore + +import ( + "context" + "database/sql" + "testing" + + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" + _ "github.com/mattn/go-sqlite3" // We need the SQLite driver for the test +) + +func setupTestDB(t *testing.T) *SQLiteStore { + db, err := sql.Open("sqlite3", ":memory:") + if err != nil { + t.Fatalf("Failed to open in-memory SQLite database: %v", err) + } + + store := &SQLiteStore{DB: db} + return store +} + +func TestIngestionDiffEngine(t *testing.T) { + store := setupTestDB(t) + defer store.DB.Close() + _, err := store.DB.Exec(` + CREATE TABLE IF NOT EXISTS sla_policies (domain TEXT, severity TEXT, days_to_remediate INTEGER, max_extensions INTEGER, days_to_triage INTEGER); + CREATE TABLE IF NOT EXISTS routing_rules (id INTEGER, rule_type TEXT, match_value TEXT, assignee TEXT, role TEXT); + CREATE TABLE IF NOT EXISTS ticket_assignments (ticket_id INTEGER, assignee TEXT, role TEXT); + CREATE TABLE IF NOT EXISTS ticket_activity (ticket_id INTEGER, actor TEXT, activity_type TEXT, new_value TEXT); + + CREATE TABLE IF NOT EXISTS tickets ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + source TEXT, + asset_identifier TEXT, + title TEXT, + severity TEXT, + description TEXT, + status TEXT, + dedupe_hash TEXT UNIQUE, + patched_at DATETIME, + domain TEXT, + triage_due_date DATETIME, + remediation_due_date DATETIME + )`) + + if err != nil { + t.Fatalf("Failed to create schema: %v", err) + } + + store.DB.Exec(`INSERT INTO tickets (source, asset_identifier, title, severity, description, status, dedupe_hash) VALUES + ('Trivy', 'Server-A', 'Old Vuln', 'High', 'Desc', 'Waiting to be Triaged', 'hash_1_open')`) + + store.DB.Exec(`INSERT INTO tickets (source, asset_identifier, title, severity, description, status, dedupe_hash) VALUES + ('Trivy', 'Server-A', 'Old Vuln', 'High', 'Desc', 'Waiting to be Triaged', 'hash_1_open')`) + + store.DB.Exec(`INSERT INTO tickets (source, asset_identifier, title, severity, description, status, dedupe_hash) VALUES + ('Trivy', 'Server-A', 'Regressed Vuln', 'High', 'Desc', 'Patched', 'hash_2_patched')`) + incomingPayload := []domain.Ticket{ + {Source: "Trivy", AssetIdentifier: "Server-A", Title: "Regressed Vuln", DedupeHash: "hash_2_patched"}, + {Source: "Trivy", AssetIdentifier: "Server-A", Title: "Brand New Vuln", DedupeHash: "hash_3_new"}, + } + + err = store.ProcessIngestionBatch(context.Background(), "Trivy", "Server-A", incomingPayload) + if err != nil { + t.Fatalf("Diff Engine failed: %v", err) + } + + var status string + + store.DB.QueryRow(`SELECT status FROM tickets WHERE dedupe_hash = 'hash_1_open'`).Scan(&status) + if status != "Patched" { + t.Errorf("Expected hash_1_open to be Auto-Patched, got %s", status) + } + + store.DB.QueryRow(`SELECT status FROM tickets WHERE dedupe_hash = 'hash_2_patched'`).Scan(&status) + if status != "Waiting to be Triaged" { + t.Errorf("Expected hash_2_patched to be Re-opened, got %s", status) + } + + store.DB.QueryRow(`SELECT status FROM tickets WHERE dedupe_hash = 'hash_3_new'`).Scan(&status) + if status != "Waiting to be Triaged" { + t.Errorf("Expected hash_3_new to be newly created, got %s", status) + } +} diff --git a/pkg/datastore/migrate.go b/pkg/datastore/migrate.go new file mode 100644 index 0000000..3b68217 --- /dev/null +++ b/pkg/datastore/migrate.go @@ -0,0 +1,58 @@ +package datastore + +import ( + "database/sql" + "fmt" + "log" +) + +// RunMigrations ensures the database schema matches the binary version +func RunMigrations(db *sql.DB, migrations []string) error { + _, err := db.Exec(` + CREATE TABLE IF NOT EXISTS schema_migrations ( + version INTEGER PRIMARY KEY, + applied_at DATETIME DEFAULT CURRENT_TIMESTAMP + ) + `) + if err != nil { + return fmt.Errorf("failed to create schema_migrations table: %v", err) + } + + var currentVersion int + err = db.QueryRow("SELECT IFNULL(MAX(version), 0) FROM schema_migrations").Scan(¤tVersion) + if err != nil && err != sql.ErrNoRows { + return fmt.Errorf("failed to read current schema version: %v", err) + } + + for i, query := range migrations { + migrationVersion := i + 1 + + if migrationVersion > currentVersion { + log.Printf("๐ Applying database migration v%d...", migrationVersion) + + // Start a transaction so if the ALTER TABLE fails, it rolls back cleanly + tx, err := db.Begin() + if err != nil { + return err + } + + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return fmt.Errorf("migration v%d failed: %v", migrationVersion, err) + } + + if _, err := tx.Exec("INSERT INTO schema_migrations (version) VALUES (?)", migrationVersion); err != nil { + tx.Rollback() + return fmt.Errorf("failed to record migration v%d: %v", migrationVersion, err) + } + + if err := tx.Commit(); err != nil { + return err + } + + log.Printf("โ Migration v%d applied successfully.", migrationVersion) + } + } + + return nil +} diff --git a/pkg/datastore/migrate_test.go b/pkg/datastore/migrate_test.go new file mode 100644 index 0000000..167ba31 --- /dev/null +++ b/pkg/datastore/migrate_test.go @@ -0,0 +1,42 @@ +package datastore + +import ( + "database/sql" + "testing" + + _ "github.com/mattn/go-sqlite3" +) + +func TestSchemaMigrations(t *testing.T) { + db, err := sql.Open("sqlite3", ":memory:") + if err != nil { + t.Fatalf("Failed to open test db: %v", err) + } + defer db.Close() + + migrations := []string{ + `CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT);`, + `ALTER TABLE users ADD COLUMN email TEXT;`, + } + + err = RunMigrations(db, migrations) + if err != nil { + t.Fatalf("Initial migration failed: %v", err) + } + + var version int + db.QueryRow("SELECT MAX(version) FROM schema_migrations").Scan(&version) + if version != 2 { + t.Errorf("Expected database to be at version 2, got %d", version) + } + + err = RunMigrations(db, migrations) + if err != nil { + t.Fatalf("Idempotent migration failed: %v", err) + } + + _, err = db.Exec("INSERT INTO users (name, email) VALUES ('Tim', 'tim@ranch.com')") + if err != nil { + t.Errorf("Migration 2 did not apply correctly! Column 'email' missing: %v", err) + } +} diff --git a/pkg/datastore/schema.sql b/pkg/datastore/schema.sql new file mode 100644 index 0000000..a39bc54 --- /dev/null +++ b/pkg/datastore/schema.sql @@ -0,0 +1,147 @@ +CREATE TABLE IF NOT EXISTS app_config ( + id INTEGER PRIMARY KEY CHECK (id = 1), + timezone TEXT DEFAULT 'America/New_York', + business_start INTEGER DEFAULT 9, + business_end INTEGER DEFAULT 17, + default_extension_days INTEGER DEFAULT 30, + backup_enabled BOOLEAN DEFAULT 1, + backup_interval_hours INTEGER DEFAULT 24, + backup_retention_days INTEGER DEFAULT 30 + ); + +INSERT OR IGNORE INTO app_config (id) VALUES (1); + +CREATE TABLE IF NOT EXISTS domains (name TEXT PRIMARY KEY); +INSERT OR IGNORE INTO domains (name) VALUES ('Vulnerability'), ('Privacy'), ('Compliance'), ('Incident'); + +CREATE TABLE IF NOT EXISTS departments (name TEXT PRIMARY KEY); +INSERT OR IGNORE INTO departments (name) VALUES ('Security'), ('IT'), ('Privacy'), ('Legal'), ('Compliance'); + +CREATE TABLE IF NOT EXISTS sla_policies ( + domain TEXT NOT NULL, + severity TEXT NOT NULL, + days_to_triage INTEGER NOT NULL DEFAULT 3, + days_to_remediate INTEGER NOT NULL, + max_extensions INTEGER NOT NULL DEFAULT 3, + PRIMARY KEY (domain, severity), + FOREIGN KEY(domain) REFERENCES domains(name) ON DELETE CASCADE + ); + +INSERT OR IGNORE INTO sla_policies (domain, severity, days_to_triage, days_to_remediate, max_extensions) VALUES + ('Vulnerability', 'Critical', 3, 14, 1), ('Vulnerability', 'High', 3, 30, 2), + ('Privacy', 'Critical', 3, 3, 0), ('Privacy', 'High', 3, 7, 1), + ('Incident', 'Critical', 3, 1, 0); + +CREATE TABLE IF NOT EXISTS users ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + email TEXT UNIQUE NOT NULL, + password_hash TEXT NOT NULL, + full_name TEXT NOT NULL, + global_role TEXT NOT NULL CHECK(global_role IN ('Sheriff', 'RangeHand', 'Wrangler', 'CircuitRider', 'Magistrate')), + department TEXT NOT NULL DEFAULT 'Security', + is_active BOOLEAN DEFAULT 1, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY(department) REFERENCES departments(name) ON DELETE SET DEFAULT + ); + +CREATE TABLE IF NOT EXISTS sessions ( + session_token TEXT PRIMARY KEY, + user_id INTEGER NOT NULL, + expires_at DATETIME NOT NULL, + FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE CASCADE + ); + +CREATE TABLE IF NOT EXISTS tickets ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + domain TEXT NOT NULL DEFAULT 'Vulnerability', + source TEXT NOT NULL DEFAULT 'Manual', + asset_identifier TEXT NOT NULL DEFAULT 'Default', + cve_id TEXT, + audit_id TEXT UNIQUE, + compliance_tags TEXT, + title TEXT NOT NULL, + description TEXT, + recommended_remediation TEXT, + severity TEXT NOT NULL, + status TEXT DEFAULT 'Waiting to be Triaged' + CHECK(status IN ( + 'Waiting to be Triaged', + 'Returned to Security', + 'Triaged', + 'Assigned Out', + 'Patched', + 'False Positive' +)), + dedupe_hash TEXT UNIQUE NOT NULL, + patch_evidence TEXT, + accessible_to_internet BOOLEAN DEFAULT 0, + assignee TEXT DEFAULT 'Unassigned', + latest_comment TEXT DEFAULT '', + + assigned_at DATETIME, + owner_viewed_at DATETIME, + triage_due_date DATETIME, + remediation_due_date DATETIME, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP, + patched_at DATETIME, + FOREIGN KEY(domain) REFERENCES domains(name) ON DELETE SET DEFAULT + ); + +CREATE INDEX IF NOT EXISTS idx_tickets_status ON tickets(status); +CREATE INDEX IF NOT EXISTS idx_tickets_severity ON tickets(severity); +CREATE INDEX IF NOT EXISTS idx_tickets_domain ON tickets(domain); +CREATE INDEX IF NOT EXISTS idx_tickets_source_asset ON tickets(source, asset_identifier); + +CREATE TABLE IF NOT EXISTS ticket_assignments ( + ticket_id INTEGER NOT NULL, + assignee TEXT NOT NULL, + role TEXT NOT NULL CHECK(role IN ('RangeHand', 'Wrangler', 'Magistrate')), + PRIMARY KEY (ticket_id, assignee, role), + FOREIGN KEY(ticket_id) REFERENCES tickets(id) ON DELETE CASCADE + ); + +CREATE TABLE IF NOT EXISTS data_adapters ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL UNIQUE, + source_name TEXT NOT NULL, + findings_path TEXT NOT NULL DEFAULT '.', + mapping_title TEXT NOT NULL, + mapping_asset TEXT NOT NULL, + mapping_severity TEXT NOT NULL, + mapping_description TEXT, + mapping_remediation TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE IF NOT EXISTS sync_logs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + source TEXT NOT NULL, + status TEXT NOT NULL, + records_processed INTEGER NOT NULL, + error_message TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE IF NOT EXISTS draft_tickets ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + report_id TEXT NOT NULL, + title TEXT DEFAULT '', + description TEXT, + severity TEXT DEFAULT 'Medium', + asset_identifier TEXT DEFAULT '', + recommended_remediation TEXT DEFAULT '', + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX IF NOT EXISTS idx_draft_tickets_report_id ON draft_tickets(report_id); + +CREATE INDEX IF NOT EXISTS idx_assignments_assignee ON ticket_assignments(assignee); + +CREATE INDEX IF NOT EXISTS idx_tickets_status_asset ON tickets(status, asset_identifier); +CREATE INDEX IF NOT EXISTS idx_tickets_updated_at ON tickets(updated_at); + +CREATE INDEX IF NOT EXISTS idx_tickets_analytics ON tickets(status, severity, source); +CREATE INDEX IF NOT EXISTS idx_tickets_due_dates ON tickets(status, remediation_due_date, triage_due_date); +CREATE INDEX IF NOT EXISTS idx_tickets_source_status ON tickets(source, status); \ No newline at end of file diff --git a/pkg/datastore/sqlite.go b/pkg/datastore/sqlite.go new file mode 100644 index 0000000..82f78f6 --- /dev/null +++ b/pkg/datastore/sqlite.go @@ -0,0 +1,17 @@ +package datastore + +import ( + "database/sql" + + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +type SQLiteStore struct { + DB *sql.DB +} + +var _ domain.TicketStore = (*SQLiteStore)(nil) + +func NewSQLiteStore(db *sql.DB) *SQLiteStore { + return &SQLiteStore{DB: db} +} diff --git a/pkg/datastore/sqlite_admin.go b/pkg/datastore/sqlite_admin.go new file mode 100644 index 0000000..6f9549a --- /dev/null +++ b/pkg/datastore/sqlite_admin.go @@ -0,0 +1,173 @@ +package datastore + +import ( + "context" + "time" + + domain2 "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +func (s *SQLiteStore) UpdateAppConfig(ctx context.Context, config domain2.AppConfig) error { + query := ` + INSERT INTO app_config (id, timezone, business_start, business_end, default_extension_days) + VALUES (1, ?, ?, ?, ?) + ON CONFLICT(id) DO UPDATE SET + timezone = excluded.timezone, + business_start = excluded.business_start, + business_end = excluded.business_end, + default_extension_days = excluded.default_extension_days + ` + _, err := s.DB.ExecContext(ctx, query, config.Timezone, config.BusinessStart, config.BusinessEnd, config.DefaultExtensionDays) + return err +} + +func (s *SQLiteStore) GetAppConfig(ctx context.Context) (domain2.AppConfig, error) { + var c domain2.AppConfig + + query := `SELECT timezone, business_start, business_end, default_extension_days, + backup_enabled, backup_interval_hours, backup_retention_days + FROM app_config WHERE id = 1` + + err := s.DB.QueryRowContext(ctx, query).Scan( + &c.Timezone, &c.BusinessStart, &c.BusinessEnd, &c.DefaultExtensionDays, + &c.Backup.Enabled, &c.Backup.IntervalHours, &c.Backup.RetentionDays, + ) + return c, err +} + +// buildSLAMap creates a fast 2D lookup table: map[Domain][Severity]Policy +func (s *SQLiteStore) buildSLAMap(ctx context.Context) (map[string]map[string]domain2.SLAPolicy, error) { + policies, err := s.GetSLAPolicies(ctx) + if err != nil { + return nil, err + } + + slaMap := make(map[string]map[string]domain2.SLAPolicy) + for _, p := range policies { + if slaMap[p.Domain] == nil { + slaMap[p.Domain] = make(map[string]domain2.SLAPolicy) + } + slaMap[p.Domain][p.Severity] = p + } + return slaMap, nil +} + +func (s *SQLiteStore) ExportSystemState(ctx context.Context) (domain2.ExportState, error) { + var state domain2.ExportState + state.Version = "1.1" + state.ExportedAt = time.Now().UTC().Format(time.RFC3339) + + config, err := s.GetAppConfig(ctx) + if err == nil { + state.AppConfig = config + } + + slas, err := s.GetSLAPolicies(ctx) + if err == nil { + state.SLAPolicies = slas + } + + users, err := s.GetAllUsers(ctx) + if err == nil { + for _, u := range users { + u.PasswordHash = "" + state.Users = append(state.Users, *u) + } + } + + adapters, err := s.GetAdapters(ctx) + if err == nil { + state.Adapters = adapters + } + + query := `SELECT id, domain, source, asset_identifier, title, COALESCE(description, ''), severity, status, dedupe_hash, created_at FROM tickets` + rows, err := s.DB.QueryContext(ctx, query) + if err != nil { + return state, err + } + defer rows.Close() + + for rows.Next() { + var t domain2.Ticket + if err := rows.Scan(&t.ID, &t.Domain, &t.Source, &t.AssetIdentifier, &t.Title, &t.Description, &t.Severity, &t.Status, &t.DedupeHash, &t.CreatedAt); err == nil { + state.Tickets = append(state.Tickets, t) + } + } + + return state, nil +} + +func (s *SQLiteStore) UpdateBackupPolicy(ctx context.Context, policy domain2.BackupPolicy) error { + _, err := s.DB.ExecContext(ctx, ` + UPDATE app_config + SET backup_enabled = ?, backup_interval_hours = ?, backup_retention_days = ? + WHERE id = 1`, + policy.Enabled, policy.IntervalHours, policy.RetentionDays) + return err +} + +func (s *SQLiteStore) GetSLAPolicies(ctx context.Context) ([]domain2.SLAPolicy, error) { + rows, err := s.DB.QueryContext(ctx, "SELECT domain, severity, days_to_remediate, max_extensions, days_to_triage FROM sla_policies ORDER BY domain, severity") + if err != nil { + return nil, err + } + defer rows.Close() + + var policies []domain2.SLAPolicy + for rows.Next() { + var p domain2.SLAPolicy + rows.Scan(&p.Domain, &p.Severity, &p.DaysToRemediate, &p.MaxExtensions, &p.DaysToTriage) + policies = append(policies, p) + } + return policies, nil +} + +func (s *SQLiteStore) UpdateSLAPolicies(ctx context.Context, slas []domain2.SLAPolicy) error { + tx, err := s.DB.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + stmt, err := tx.PrepareContext(ctx, ` + UPDATE sla_policies + SET days_to_triage = ?, days_to_remediate = ?, max_extensions = ? + WHERE domain = ? AND severity = ?`) + if err != nil { + return err + } + defer stmt.Close() + + for _, sla := range slas { + _, err = stmt.ExecContext(ctx, sla.DaysToTriage, sla.DaysToRemediate, sla.MaxExtensions, sla.Domain, sla.Severity) + if err != nil { + return err + } + } + + return tx.Commit() +} + +func (s *SQLiteStore) GetWranglers(ctx context.Context) ([]domain2.User, error) { + query := ` + SELECT id, email, full_name, global_role, is_active, created_at + FROM users + WHERE global_role = 'Wrangler' AND is_active = 1 + ORDER BY email ASC + ` + rows, err := s.DB.QueryContext(ctx, query) + if err != nil { + return nil, err + } + defer rows.Close() + + var wranglers []domain2.User + for rows.Next() { + var w domain2.User + if err := rows.Scan(&w.ID, &w.Email, &w.FullName, &w.GlobalRole, &w.IsActive, &w.CreatedAt); err != nil { + return nil, err + } + wranglers = append(wranglers, w) + } + return wranglers, nil +} diff --git a/pkg/datastore/sqlite_analytics.go b/pkg/datastore/sqlite_analytics.go new file mode 100644 index 0000000..8ff7d6a --- /dev/null +++ b/pkg/datastore/sqlite_analytics.go @@ -0,0 +1,357 @@ +package datastore + +import ( + "context" + "fmt" + "time" + + domain2 "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +func (s *SQLiteStore) GetSheriffAnalytics(ctx context.Context) (domain2.SheriffAnalytics, error) { + var metrics domain2.SheriffAnalytics + + s.DB.QueryRowContext(ctx, "SELECT COUNT(*) FROM tickets WHERE is_cisa_kev = 1 AND status NOT IN ('Patched', 'Risk Accepted', 'False Positive')").Scan(&metrics.ActiveKEVs) + s.DB.QueryRowContext(ctx, "SELECT COUNT(*) FROM tickets WHERE severity = 'Critical' AND status NOT IN ('Patched', 'Risk Accepted', 'False Positive')").Scan(&metrics.OpenCriticals) + s.DB.QueryRowContext(ctx, "SELECT COUNT(*) FROM tickets WHERE remediation_due_date < CURRENT_TIMESTAMP AND status NOT IN ('Patched', 'Risk Accepted', 'False Positive')").Scan(&metrics.TotalOverdue) + + mttrQuery := ` + SELECT COALESCE(AVG(julianday(t.patched_at) - julianday(t.created_at)), 0) + FROM tickets t + WHERE t.status = 'Patched' + ` + var mttrFloat float64 + s.DB.QueryRowContext(ctx, mttrQuery).Scan(&mttrFloat) + metrics.GlobalMTTRDays = int(mttrFloat) + + sourceQuery := ` + SELECT + t.source, + SUM(CASE WHEN t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive') THEN 1 ELSE 0 END) as total_open, + SUM(CASE WHEN t.severity = 'Critical' AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive') THEN 1 ELSE 0 END) as criticals, + SUM(CASE WHEN t.is_cisa_kev = 1 AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive') THEN 1 ELSE 0 END) as cisa_kevs, + SUM(CASE WHEN t.status = 'Waiting to be Triaged' THEN 1 ELSE 0 END) as untriaged, + SUM(CASE WHEN t.remediation_due_date < CURRENT_TIMESTAMP AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive') THEN 1 ELSE 0 END) as patch_overdue, + SUM(CASE WHEN t.status = 'Pending Risk Approval' THEN 1 ELSE 0 END) as pending_risk, + + SUM(CASE WHEN t.status IN ('Patched', 'Risk Accepted', 'False Positive') THEN 1 ELSE 0 END) as total_closed, + SUM(CASE WHEN t.status = 'Patched' THEN 1 ELSE 0 END) as patched, + SUM(CASE WHEN t.status = 'Risk Accepted' THEN 1 ELSE 0 END) as risk_accepted, + SUM(CASE WHEN t.status = 'False Positive' THEN 1 ELSE 0 END) as false_positive + FROM tickets t + GROUP BY t.source + ORDER BY criticals DESC, patch_overdue DESC + ` + rows, err := s.DB.QueryContext(ctx, sourceQuery) + if err == nil { + defer rows.Close() + for rows.Next() { + var sm domain2.SourceMetrics + rows.Scan(&sm.Source, &sm.TotalOpen, &sm.Criticals, &sm.CisaKEVs, &sm.Untriaged, &sm.PatchOverdue, &sm.PendingRisk, &sm.TotalClosed, &sm.Patched, &sm.RiskAccepted, &sm.FalsePositive) + + topAssigneeQ := ` + SELECT COALESCE(ta.assignee, 'Unassigned'), COUNT(t.id) as c + FROM tickets t LEFT JOIN ticket_assignments ta ON t.id = ta.ticket_id + WHERE t.source = ? AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive') + GROUP BY ta.assignee ORDER BY c DESC LIMIT 1` + + var assignee string + var count int + s.DB.QueryRowContext(ctx, topAssigneeQ, sm.Source).Scan(&assignee, &count) + if count > 0 { + sm.TopAssignee = fmt.Sprintf("%s (%d)", assignee, count) + } else { + sm.TopAssignee = "N/A" + } + + if sm.PatchOverdue > 0 { + sm.StrategicNote = "๐จ SLA Breach (Escalate to IT Managers)" + } else if sm.Untriaged > 0 { + sm.StrategicNote = "โ ๏ธ Triage Bottleneck (Check Analysts)" + } else if sm.PendingRisk > 0 { + sm.StrategicNote = "โ๏ธ Blocked by Exec Adjudication" + } else if sm.Criticals > 0 { + sm.StrategicNote = "๐ฅ High Risk (Monitor closely)" + } else if sm.RiskAccepted > sm.Patched && sm.TotalClosed > 0 { + sm.StrategicNote = "๐ High Risk Acceptance Rate (Audit Required)" + } else if sm.FalsePositive > sm.Patched && sm.TotalClosed > 0 { + sm.StrategicNote = "๐ง Noisy Source (Scanner needs tuning)" + } else if sm.TotalClosed > 0 { + sm.StrategicNote = "โ Healthy Resolution Velocity" + } else { + sm.StrategicNote = "โ Routine Processing" + } + + metrics.SourceHealth = append(metrics.SourceHealth, sm) + } + } + + sevQuery := `SELECT severity, COUNT(id) FROM tickets WHERE status NOT IN ('Patched', 'Risk Accepted', 'False Positive') GROUP BY severity` + rowsSev, err := s.DB.QueryContext(ctx, sevQuery) + if err == nil { + defer rowsSev.Close() + for rowsSev.Next() { + var sev string + var count int + rowsSev.Scan(&sev, &count) + metrics.Severity.Total += count + switch sev { + case "Critical": + metrics.Severity.Critical = count + case "High": + metrics.Severity.High = count + case "Medium": + metrics.Severity.Medium = count + case "Low": + metrics.Severity.Low = count + case "Info": + metrics.Severity.Info = count + } + } + if metrics.Severity.Total > 0 { + metrics.Severity.CritPct = int((float64(metrics.Severity.Critical) / float64(metrics.Severity.Total)) * 100) + metrics.Severity.HighPct = int((float64(metrics.Severity.High) / float64(metrics.Severity.Total)) * 100) + metrics.Severity.MedPct = int((float64(metrics.Severity.Medium) / float64(metrics.Severity.Total)) * 100) + metrics.Severity.LowPct = int((float64(metrics.Severity.Low) / float64(metrics.Severity.Total)) * 100) + metrics.Severity.InfoPct = int((float64(metrics.Severity.Info) / float64(metrics.Severity.Total)) * 100) + } + } + + resQuery := `SELECT status, COUNT(id) FROM tickets WHERE status IN ('Patched', 'Risk Accepted', 'False Positive') GROUP BY status` + rowsRes, err := s.DB.QueryContext(ctx, resQuery) + if err == nil { + defer rowsRes.Close() + for rowsRes.Next() { + var status string + var count int + rowsRes.Scan(&status, &count) + metrics.Resolution.Total += count + + switch status { + case "Patched": + metrics.Resolution.Patched = count + case "Risk Accepted": + metrics.Resolution.RiskAccepted = count + case "False Positive": + metrics.Resolution.FalsePositive = count + } + } + + if metrics.Resolution.Total > 0 { + metrics.Resolution.PatchedPct = int((float64(metrics.Resolution.Patched) / float64(metrics.Resolution.Total)) * 100) + metrics.Resolution.RiskAccPct = int((float64(metrics.Resolution.RiskAccepted) / float64(metrics.Resolution.Total)) * 100) + metrics.Resolution.FalsePosPct = int((float64(metrics.Resolution.FalsePositive) / float64(metrics.Resolution.Total)) * 100) + } + } + + assetQuery := `SELECT asset_identifier, COUNT(id) as c FROM tickets WHERE status NOT IN ('Patched', 'Risk Accepted', 'False Positive') GROUP BY asset_identifier ORDER BY c DESC LIMIT 5` + rowsAsset, err := s.DB.QueryContext(ctx, assetQuery) + if err == nil { + defer rowsAsset.Close() + var maxAssetCount int + for rowsAsset.Next() { + var am domain2.AssetMetric + rowsAsset.Scan(&am.Asset, &am.Count) + if maxAssetCount == 0 { + maxAssetCount = am.Count + } + if maxAssetCount > 0 { + am.Percentage = int((float64(am.Count) / float64(maxAssetCount)) * 100) + } + metrics.TopAssets = append(metrics.TopAssets, am) + } + } + + return metrics, nil +} + +func (s *SQLiteStore) GetDashboardTickets(ctx context.Context, tabStatus, filter, assetFilter, userEmail, userRole string, limit, offset int) ([]domain2.Ticket, int, map[string]int, error) { + metrics := map[string]int{ + "critical": 0, + "overdue": 0, + "mine": 0, + "verification": 0, + "returned": 0, + } + + scope := "" + var scopeArgs []any + + if userRole == "Wrangler" { + scope = ` AND LOWER(t.assignee) = LOWER(?)` + scopeArgs = append(scopeArgs, userEmail) + } + + if userRole != "Sheriff" { + var critCount, overCount, mineCount, verifyCount, returnedCount int + + critQ := "SELECT COUNT(t.id) FROM tickets t WHERE t.severity = 'Critical' AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive')" + scope + s.DB.QueryRowContext(ctx, critQ, scopeArgs...).Scan(&critCount) + metrics["critical"] = critCount + + overQ := "SELECT COUNT(t.id) FROM tickets t WHERE t.remediation_due_date < CURRENT_TIMESTAMP AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive')" + scope + s.DB.QueryRowContext(ctx, overQ, scopeArgs...).Scan(&overCount) + metrics["overdue"] = overCount + + mineQ := "SELECT COUNT(t.id) FROM tickets t WHERE LOWER(t.assignee) = LOWER(?) AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive')" + s.DB.QueryRowContext(ctx, mineQ, userEmail).Scan(&mineCount) + metrics["mine"] = mineCount + + verifyQ := "SELECT COUNT(t.id) FROM tickets t WHERE t.status = 'Pending Verification'" + scope + s.DB.QueryRowContext(ctx, verifyQ, scopeArgs...).Scan(&verifyCount) + metrics["verification"] = verifyCount + + retQ := "SELECT COUNT(t.id) FROM tickets t WHERE t.status = 'Returned to Security'" + scope + s.DB.QueryRowContext(ctx, retQ, scopeArgs...).Scan(&returnedCount) + metrics["returned"] = returnedCount + } + + baseQ := "FROM tickets t WHERE 1=1" + scope + var args []any + args = append(args, scopeArgs...) + + if assetFilter != "" { + baseQ += " AND t.asset_identifier = ?" + args = append(args, assetFilter) + } + + if tabStatus == "Waiting to be Triaged" || tabStatus == "holding_pen" { + baseQ += " AND t.status IN ('Waiting to be Triaged', 'Returned to Security', 'Triaged')" + } else if tabStatus == "Exceptions" { + baseQ += " AND t.status NOT IN ('Patched', 'Risk Accepted', 'False Positive')" + } else if tabStatus == "archives" { + baseQ += " AND t.status IN ('Patched', 'Risk Accepted', 'False Positive')" + } else if tabStatus != "" { + baseQ += " AND t.status = ?" + args = append(args, tabStatus) + } + + if filter == "critical" { + baseQ += " AND t.severity = 'Critical'" + } else if filter == "overdue" { + baseQ += " AND t.remediation_due_date < CURRENT_TIMESTAMP" + } else if filter == "mine" { + baseQ += " AND LOWER(t.assignee) = LOWER(?)" + args = append(args, userEmail) + } else if tabStatus == "archives" && filter != "" && filter != "all" { + baseQ += " AND t.status = ?" + args = append(args, filter) + } + + var total int + s.DB.QueryRowContext(ctx, "SELECT COUNT(t.id) "+baseQ, args...).Scan(&total) + + orderClause := "ORDER BY (CASE WHEN t.status = 'Returned to Security' THEN 0 ELSE 1 END) ASC, t.id DESC" + + query := ` + WITH PaginatedIDs AS ( + SELECT t.id ` + baseQ + ` ` + orderClause + ` LIMIT ? OFFSET ? + ) + SELECT + t.id, t.source, t.asset_identifier, t.title, COALESCE(t.description, ''), COALESCE(t.recommended_remediation, ''), t.severity, t.status, + t.triage_due_date, t.remediation_due_date, COALESCE(t.patch_evidence, ''), + t.assignee as current_assignee, + t.owner_viewed_at, + t.updated_at, + CAST(julianday(COALESCE(t.patched_at, t.updated_at)) - julianday(t.created_at) AS INTEGER) as days_to_resolve, + COALESCE(t.latest_comment, '') as latest_comment + FROM PaginatedIDs p + JOIN tickets t ON t.id = p.id + ` + orderClause + + args = append(args, limit, offset) + + rows, err := s.DB.QueryContext(ctx, query, args...) + if err != nil { + return nil, 0, metrics, err + } + defer rows.Close() + + var tickets []domain2.Ticket + for rows.Next() { + var t domain2.Ticket + var assignee string + + err := rows.Scan( + &t.ID, &t.Source, &t.AssetIdentifier, &t.Title, &t.Description, + &t.RecommendedRemediation, &t.Severity, &t.Status, + &t.TriageDueDate, &t.RemediationDueDate, &t.PatchEvidence, + &assignee, + &t.OwnerViewedAt, + &t.UpdatedAt, + &t.DaysToResolve, + &t.LatestComment, + ) + + if err == nil { + t.Assignee = assignee + t.IsOverdue = !t.RemediationDueDate.IsZero() && t.RemediationDueDate.Before(time.Now()) && t.Status != "Patched" && t.Status != "Risk Accepted" + + if tabStatus == "archives" { + if t.DaysToResolve != nil { + t.SLAString = fmt.Sprintf("%d days", *t.DaysToResolve) + } else { + t.SLAString = "Unknown" + } + } else { + t.SLAString = t.RemediationDueDate.Format("Jan 02, 2006") + } + + tickets = append(tickets, t) + } + } + + return tickets, total, metrics, nil +} + +func (s *SQLiteStore) GetGlobalActivityFeed(ctx context.Context, limit int) ([]domain2.FeedItem, error) { + return []domain2.FeedItem{ + { + Actor: "System", + ActivityType: "Info", + NewValue: "Detailed Immutable Audit Logging is a RiskRancher Pro feature. Upgrade to track all ticket lifecycle events.", + TimeAgo: "Just now", + }, + }, nil +} + +func (s *SQLiteStore) GetAnalyticsSummary(ctx context.Context) (map[string]int, error) { + summary := make(map[string]int) + + var total int + err := s.DB.QueryRowContext(ctx, `SELECT COUNT(*) FROM tickets WHERE status != 'Patched' AND status != 'Risk Accepted'`).Scan(&total) + if err != nil { + return nil, err + } + summary["Total_Open"] = total + + sourceRows, err := s.DB.QueryContext(ctx, `SELECT source, COUNT(*) FROM tickets WHERE status != 'Patched' AND status != 'Risk Accepted' GROUP BY source`) + if err == nil { + defer sourceRows.Close() + for sourceRows.Next() { + var source string + var count int + if err := sourceRows.Scan(&source, &count); err == nil { + summary["Source_"+source+"_Open"] = count + } + } + } + + sevRows, err := s.DB.QueryContext(ctx, `SELECT severity, COUNT(*) FROM tickets WHERE status != 'Patched' AND status != 'Risk Accepted' GROUP BY severity`) + if err == nil { + defer sevRows.Close() + for sevRows.Next() { + var sev string + var count int + if err := sevRows.Scan(&sev, &count); err == nil { + summary["Severity_"+sev+"_Open"] = count + } + } + } + + return summary, nil +} + +func (s *SQLiteStore) GetPaginatedActivityFeed(ctx context.Context, filter string, limit, offset int) ([]domain2.FeedItem, int, error) { + return []domain2.FeedItem{}, 0, nil +} diff --git a/pkg/datastore/sqlite_drafts.go b/pkg/datastore/sqlite_drafts.go new file mode 100644 index 0000000..785427e --- /dev/null +++ b/pkg/datastore/sqlite_drafts.go @@ -0,0 +1,109 @@ +package datastore + +import ( + "context" + "fmt" + + domain2 "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +func (s *SQLiteStore) SaveDraft(ctx context.Context, d domain2.DraftTicket) error { + query := ` + INSERT INTO draft_tickets (report_id, title, description, severity, asset_identifier, recommended_remediation) + VALUES (?, ?, ?, ?, ?, ?)` + + _, err := s.DB.ExecContext(ctx, query, + d.ReportID, d.Title, d.Description, d.Severity, d.AssetIdentifier, d.RecommendedRemediation) + return err +} + +func (s *SQLiteStore) GetDraftsByReport(ctx context.Context, reportID string) ([]domain2.DraftTicket, error) { + + query := `SELECT id, report_id, COALESCE(title, ''), COALESCE(description, ''), COALESCE(severity, 'Medium'), COALESCE(asset_identifier, ''), COALESCE(recommended_remediation, '') + FROM draft_tickets WHERE report_id = ?` + + rows, err := s.DB.QueryContext(ctx, query, reportID) + if err != nil { + return nil, err + } + defer rows.Close() + + var drafts []domain2.DraftTicket + for rows.Next() { + var d domain2.DraftTicket + if err := rows.Scan(&d.ID, &d.ReportID, &d.Title, &d.Description, &d.Severity, &d.AssetIdentifier, &d.RecommendedRemediation); err == nil { + drafts = append(drafts, d) + } + } + + if drafts == nil { + drafts = []domain2.DraftTicket{} + } + return drafts, nil +} + +func (s *SQLiteStore) DeleteDraft(ctx context.Context, draftID string) error { + query := `DELETE FROM draft_tickets WHERE id = ?` + _, err := s.DB.ExecContext(ctx, query, draftID) + return err +} + +func (s *SQLiteStore) UpdateDraft(ctx context.Context, draftID int, payload domain2.Ticket) error { + query := `UPDATE draft_tickets SET title = ?, severity = ?, asset_identifier = ?, description = ?, recommended_remediation = ? WHERE id = ?` + + _, err := s.DB.ExecContext( + ctx, + query, + payload.Title, + payload.Severity, + payload.AssetIdentifier, + payload.Description, + payload.RecommendedRemediation, + draftID, + ) + + return err +} + +func (s *SQLiteStore) PromotePentestDrafts(ctx context.Context, reportID string, analystEmail string, tickets []domain2.Ticket) error { + tx, err := s.DB.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + for _, t := range tickets { + hash := fmt.Sprintf("manual-pentest-%s-%s", t.AssetIdentifier, t.Title) + + res, err := tx.ExecContext(ctx, ` + INSERT INTO tickets ( + source, asset_identifier, title, description, recommended_remediation, severity, status, dedupe_hash, + triage_due_date, remediation_due_date, created_at, updated_at + ) + VALUES (?, ?, ?, ?, ?, ?, 'Waiting to be Triaged', ?, DATETIME('now', '+3 days'), DATETIME('now', '+14 days'), CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) + `, "Manual Pentest", t.AssetIdentifier, t.Title, t.Description, t.RecommendedRemediation, t.Severity, hash) + if err != nil { + return err + } + + ticketID, err := res.LastInsertId() + if err != nil { + return err + } + + _, err = tx.ExecContext(ctx, ` + INSERT INTO ticket_assignments (ticket_id, assignee, role) + VALUES (?, ?, 'RangeHand') + `, ticketID, analystEmail) + if err != nil { + return err + } + } + + _, err = tx.ExecContext(ctx, "DELETE FROM draft_tickets WHERE report_id = ?", reportID) + if err != nil { + return err + } + + return tx.Commit() +} diff --git a/pkg/datastore/sqlite_ingest.go b/pkg/datastore/sqlite_ingest.go new file mode 100644 index 0000000..d6af8d5 --- /dev/null +++ b/pkg/datastore/sqlite_ingest.go @@ -0,0 +1,284 @@ +package datastore + +import ( + "context" + "database/sql" + "time" + + domain2 "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +func (s *SQLiteStore) IngestTickets(ctx context.Context, tickets []domain2.Ticket) error { + tx, err := s.DB.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + _, err = tx.ExecContext(ctx, ` + CREATE TEMP TABLE IF NOT EXISTS staging_tickets ( + domain TEXT, source TEXT, asset_identifier TEXT, title TEXT, + description TEXT, recommended_remediation TEXT, severity TEXT, + status TEXT, dedupe_hash TEXT + ) + `) + if err != nil { + return err + } + tx.ExecContext(ctx, `DELETE FROM staging_tickets`) + + stmt, err := tx.PrepareContext(ctx, ` + INSERT INTO staging_tickets (domain, source, asset_identifier, title, description, recommended_remediation, severity, status, dedupe_hash) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) + `) + if err != nil { + return err + } + + for _, t := range tickets { + status := t.Status + if status == "" { + status = "Waiting to be Triaged" + } + domain := t.Domain + if domain == "" { + domain = "Vulnerability" + } + source := t.Source + if source == "" { + source = "Manual" + } + + _, err = stmt.ExecContext(ctx, domain, source, t.AssetIdentifier, t.Title, t.Description, t.RecommendedRemediation, t.Severity, status, t.DedupeHash) + if err != nil { + stmt.Close() + return err + } + } + stmt.Close() + + _, err = tx.ExecContext(ctx, ` + INSERT INTO tickets (domain, source, asset_identifier, title, description, recommended_remediation, severity, status, dedupe_hash) + SELECT domain, source, asset_identifier, title, description, recommended_remediation, severity, status, dedupe_hash + FROM staging_tickets + WHERE true -- Prevents SQLite from mistaking 'ON CONFLICT' for a JOIN condition + ON CONFLICT(dedupe_hash) DO UPDATE SET + description = excluded.description, + updated_at = CURRENT_TIMESTAMP + `) + if err != nil { + return err + } + + tx.ExecContext(ctx, `DROP TABLE staging_tickets`) + return tx.Commit() +} + +func (s *SQLiteStore) GetAdapters(ctx context.Context) ([]domain2.Adapter, error) { + rows, err := s.DB.QueryContext(ctx, "SELECT id, name, source_name, findings_path, mapping_title, mapping_asset, mapping_severity, mapping_description, mapping_remediation FROM data_adapters") + if err != nil { + return nil, err + } + defer rows.Close() + + var adapters []domain2.Adapter + for rows.Next() { + var a domain2.Adapter + rows.Scan(&a.ID, &a.Name, &a.SourceName, &a.FindingsPath, &a.MappingTitle, &a.MappingAsset, &a.MappingSeverity, &a.MappingDescription, &a.MappingRemediation) + adapters = append(adapters, a) + } + return adapters, nil +} + +func (s *SQLiteStore) SaveAdapter(ctx context.Context, a domain2.Adapter) error { + _, err := s.DB.ExecContext(ctx, ` + INSERT INTO data_adapters (name, source_name, findings_path, mapping_title, mapping_asset, mapping_severity, mapping_description, mapping_remediation) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, + a.Name, a.SourceName, a.FindingsPath, a.MappingTitle, a.MappingAsset, a.MappingSeverity, a.MappingDescription, a.MappingRemediation) + return err +} + +func (s *SQLiteStore) GetAdapterByID(ctx context.Context, id int) (domain2.Adapter, error) { + var a domain2.Adapter + query := ` + SELECT + id, name, source_name, findings_path, + mapping_title, mapping_asset, mapping_severity, + IFNULL(mapping_description, ''), IFNULL(mapping_remediation, ''), + created_at, updated_at + FROM data_adapters + WHERE id = ?` + + err := s.DB.QueryRowContext(ctx, query, id).Scan( + &a.ID, &a.Name, &a.SourceName, &a.FindingsPath, + &a.MappingTitle, &a.MappingAsset, &a.MappingSeverity, + &a.MappingDescription, &a.MappingRemediation, + &a.CreatedAt, &a.UpdatedAt, + ) + return a, err +} + +func (s *SQLiteStore) DeleteAdapter(ctx context.Context, id int) error { + _, err := s.DB.ExecContext(ctx, "DELETE FROM data_adapters WHERE id = ?", id) + return err +} + +func (s *SQLiteStore) GetAdapterByName(ctx context.Context, name string) (domain2.Adapter, error) { + var a domain2.Adapter + query := ` + SELECT + id, name, source_name, findings_path, + mapping_title, mapping_asset, mapping_severity, + IFNULL(mapping_description, ''), IFNULL(mapping_remediation, '') + FROM data_adapters + WHERE name = ?` + + err := s.DB.QueryRowContext(ctx, query, name).Scan( + &a.ID, &a.Name, &a.SourceName, &a.FindingsPath, + &a.MappingTitle, &a.MappingAsset, &a.MappingSeverity, + &a.MappingDescription, &a.MappingRemediation, + ) + return a, err +} + +func (s *SQLiteStore) ProcessIngestionBatch(ctx context.Context, source, asset string, incoming []domain2.Ticket) error { + slaMap, _ := s.buildSLAMap(ctx) + + tx, err := s.DB.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + for i := range incoming { + if incoming[i].Domain == "" { + incoming[i].Domain = "Vulnerability" + } + if incoming[i].Status == "" { + incoming[i].Status = "Waiting to be Triaged" + } + } + + inserts, reopens, updates, closes, err := s.calculateDiffState(ctx, tx, source, asset, incoming) + if err != nil { + return err + } + + if err := s.executeBatchMutations(ctx, tx, source, asset, slaMap, inserts, reopens, updates, closes); err != nil { + return err + } + + return tx.Commit() +} + +func (s *SQLiteStore) calculateDiffState(ctx context.Context, tx *sql.Tx, source, asset string, incoming []domain2.Ticket) (inserts, reopens, descUpdates []domain2.Ticket, autocloses []string, err error) { + rows, err := tx.QueryContext(ctx, `SELECT dedupe_hash, status, COALESCE(description, '') FROM tickets WHERE source = ? AND asset_identifier = ?`, source, asset) + if err != nil { + return nil, nil, nil, nil, err + } + defer rows.Close() + + type existingRecord struct{ status, description string } + existingMap := make(map[string]existingRecord) + for rows.Next() { + var hash, status, desc string + if err := rows.Scan(&hash, &status, &desc); err == nil { + existingMap[hash] = existingRecord{status: status, description: desc} + } + } + + incomingMap := make(map[string]bool) + for _, ticket := range incoming { + incomingMap[ticket.DedupeHash] = true + existing, exists := existingMap[ticket.DedupeHash] + if !exists { + inserts = append(inserts, ticket) + } else { + if existing.status == "Patched" { + reopens = append(reopens, ticket) + } + if ticket.Description != "" && ticket.Description != existing.description && existing.status != "Patched" && existing.status != "Risk Accepted" && existing.status != "False Positive" { + descUpdates = append(descUpdates, ticket) + } + } + } + + for hash, record := range existingMap { + if !incomingMap[hash] && record.status != "Patched" && record.status != "Risk Accepted" && record.status != "False Positive" { + autocloses = append(autocloses, hash) + } + } + return inserts, reopens, descUpdates, autocloses, nil +} + +func (s *SQLiteStore) executeBatchMutations(ctx context.Context, tx *sql.Tx, source, asset string, slaMap map[string]map[string]domain2.SLAPolicy, inserts, reopens, descUpdates []domain2.Ticket, autocloses []string) error { + now := time.Now() + + // A. Inserts + if len(inserts) > 0 { + insertStmt, err := tx.PrepareContext(ctx, `INSERT INTO tickets (source, asset_identifier, title, severity, description, status, dedupe_hash, domain, triage_due_date, remediation_due_date) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`) + if err != nil { + return err + } + defer insertStmt.Close() + + for _, t := range inserts { + daysToTriage, daysToRemediate := 3, 30 + if dMap, ok := slaMap[t.Domain]; ok { + if policy, ok := dMap[t.Severity]; ok { + daysToTriage, daysToRemediate = policy.DaysToTriage, policy.DaysToRemediate + } + } + _, err := insertStmt.ExecContext(ctx, source, asset, t.Title, t.Severity, t.Description, t.Status, t.DedupeHash, t.Domain, now.AddDate(0, 0, daysToTriage), now.AddDate(0, 0, daysToRemediate)) + if err != nil { + return err + } + } + } + + if len(reopens) > 0 { + updateStmt, _ := tx.PrepareContext(ctx, `UPDATE tickets SET status = 'Waiting to be Triaged', patched_at = NULL, triage_due_date = ?, remediation_due_date = ? WHERE dedupe_hash = ?`) + defer updateStmt.Close() + for _, t := range reopens { + updateStmt.ExecContext(ctx, now.AddDate(0, 0, 3), now.AddDate(0, 0, 30), t.DedupeHash) // Using default SLAs for fallback + } + } + + if len(descUpdates) > 0 { + descStmt, _ := tx.PrepareContext(ctx, `UPDATE tickets SET description = ? WHERE dedupe_hash = ?`) + defer descStmt.Close() + for _, t := range descUpdates { + descStmt.ExecContext(ctx, t.Description, t.DedupeHash) + } + } + + if len(autocloses) > 0 { + closeStmt, _ := tx.PrepareContext(ctx, `UPDATE tickets SET status = 'Patched', patched_at = CURRENT_TIMESTAMP WHERE dedupe_hash = ?`) + defer closeStmt.Close() + for _, hash := range autocloses { + closeStmt.ExecContext(ctx, hash) + } + } + + return nil +} + +func (s *SQLiteStore) LogSync(ctx context.Context, source, status string, records int, errMsg string) error { + _, err := s.DB.ExecContext(ctx, `INSERT INTO sync_logs (source, status, records_processed, error_message) VALUES (?, ?, ?, ?)`, source, status, records, errMsg) + return err +} + +func (s *SQLiteStore) GetRecentSyncLogs(ctx context.Context, limit int) ([]domain2.SyncLog, error) { + rows, err := s.DB.QueryContext(ctx, `SELECT id, source, status, records_processed, IFNULL(error_message, ''), created_at FROM sync_logs ORDER BY id DESC LIMIT ?`, limit) + if err != nil { + return nil, err + } + defer rows.Close() + var logs []domain2.SyncLog + for rows.Next() { + var l domain2.SyncLog + rows.Scan(&l.ID, &l.Source, &l.Status, &l.RecordsProcessed, &l.ErrorMessage, &l.CreatedAt) + logs = append(logs, l) + } + return logs, nil +} diff --git a/pkg/datastore/sqlite_tickets.go b/pkg/datastore/sqlite_tickets.go new file mode 100644 index 0000000..7d243a6 --- /dev/null +++ b/pkg/datastore/sqlite_tickets.go @@ -0,0 +1,131 @@ +package datastore + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "time" + + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +func (s *SQLiteStore) GetTickets(ctx context.Context) ([]domain.Ticket, error) { + rows, err := s.DB.QueryContext(ctx, "SELECT id, title, severity, status FROM tickets LIMIT 100") + if err != nil { + return nil, err + } + defer rows.Close() + + var tickets []domain.Ticket + for rows.Next() { + var t domain.Ticket + rows.Scan(&t.ID, &t.Title, &t.Severity, &t.Status) + tickets = append(tickets, t) + } + return tickets, nil +} + +func (s *SQLiteStore) CreateTicket(ctx context.Context, t *domain.Ticket) error { + if t.Status == "" { + t.Status = "Waiting to be Triaged" + } + if t.Domain == "" { + t.Domain = "Vulnerability" + } + if t.Source == "" { + t.Source = "Manual" + } + if t.AssetIdentifier == "" { + t.AssetIdentifier = "Default" + } + + rawHash := fmt.Sprintf("%s-%s-%s-%s", t.Source, t.AssetIdentifier, t.Title, t.Severity) + hashBytes := sha256.Sum256([]byte(rawHash)) + t.DedupeHash = hex.EncodeToString(hashBytes[:]) + + query := ` + INSERT INTO tickets ( + domain, source, asset_identifier, title, description, recommended_remediation, + severity, status, dedupe_hash, + triage_due_date, remediation_due_date, created_at, updated_at + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now', '+3 days'), DATETIME('now', '+14 days'), CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) + ` + + res, err := s.DB.ExecContext(ctx, query, + t.Domain, t.Source, t.AssetIdentifier, t.Title, t.Description, t.RecommendedRemediation, + t.Severity, t.Status, t.DedupeHash, + ) + + if err != nil { + return err + } + + id, _ := res.LastInsertId() + t.ID = int(id) + return nil +} + +// UpdateTicketInline handles a single UI edit and updates the flattened comment tracking +func (s *SQLiteStore) UpdateTicketInline(ctx context.Context, ticketID int, severity, description, remediation, comment, actor, status, assignee string) error { + query := ` + UPDATE tickets + SET severity = ?, description = ?, recommended_remediation = ?, + status = ?, assignee = ?, + latest_comment = CASE WHEN ? != '' THEN ? ELSE latest_comment END, + updated_at = CURRENT_TIMESTAMP + WHERE id = ?` + + formattedComment := "" + if comment != "" { + formattedComment = "[" + actor + "] " + comment + } + + _, err := s.DB.ExecContext(ctx, query, severity, description, remediation, status, assignee, formattedComment, formattedComment, ticketID) + return err +} + +// RejectTicketFromWrangler puts a ticket back into the Holding Pen +func (s *SQLiteStore) RejectTicketFromWrangler(ctx context.Context, ticketIDs []int, reason, comment string) error { + tx, err := s.DB.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + for _, id := range ticketIDs { + fullComment := "[Wrangler Reject: " + reason + "] " + comment + _, err := tx.ExecContext(ctx, "UPDATE tickets SET status = 'Returned to Security', assignee = 'Unassigned', latest_comment = ?, updated_at = CURRENT_TIMESTAMP WHERE id = ?", fullComment, id) + if err != nil { + return err + } + } + return tx.Commit() +} + +func (s *SQLiteStore) GetTicketByID(ctx context.Context, id int) (domain.Ticket, error) { + var t domain.Ticket + var triageDue, remDue, created, updated string + var patchedAt *string + + query := `SELECT id, domain, source, asset_identifier, title, description, recommended_remediation, severity, status, dedupe_hash, triage_due_date, remediation_due_date, created_at, updated_at, patched_at, assignee, latest_comment FROM tickets WHERE id = ?` + + err := s.DB.QueryRowContext(ctx, query, id).Scan( + &t.ID, &t.Domain, &t.Source, &t.AssetIdentifier, &t.Title, &t.Description, &t.RecommendedRemediation, &t.Severity, &t.Status, &t.DedupeHash, &triageDue, &remDue, &created, &updated, &patchedAt, &t.Assignee, &t.LatestComment, + ) + if err != nil { + return t, err + } + + t.TriageDueDate, _ = time.Parse(time.RFC3339, triageDue) + t.RemediationDueDate, _ = time.Parse(time.RFC3339, remDue) + t.CreatedAt, _ = time.Parse(time.RFC3339, created) + t.UpdatedAt, _ = time.Parse(time.RFC3339, updated) + + if patchedAt != nil { + pTime, _ := time.Parse(time.RFC3339, *patchedAt) + t.PatchedAt = &pTime + } + + return t, nil +} diff --git a/pkg/domain/adapter.go b/pkg/domain/adapter.go new file mode 100644 index 0000000..700cdae --- /dev/null +++ b/pkg/domain/adapter.go @@ -0,0 +1,16 @@ +package domain + +// Adapter represents a saved mapping profile for a specific scanner +type Adapter struct { + ID int `json:"id"` + Name string `json:"name"` + SourceName string `json:"source_name"` + FindingsPath string `json:"findings_path"` + MappingTitle string `json:"mapping_title"` + MappingAsset string `json:"mapping_asset"` + MappingSeverity string `json:"mapping_severity"` + MappingDescription string `json:"mapping_description"` + MappingRemediation string `json:"mapping_remediation"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` +} diff --git a/pkg/domain/analytics.go b/pkg/domain/analytics.go new file mode 100644 index 0000000..099c43b --- /dev/null +++ b/pkg/domain/analytics.go @@ -0,0 +1,74 @@ +package domain + +type ResolutionMetrics struct { + Total int + Patched int + RiskAccepted int + FalsePositive int + PatchedPct int + RiskAccPct int + FalsePosPct int +} + +type SheriffAnalytics struct { + ActiveKEVs int + GlobalMTTRDays int + OpenCriticals int + TotalOverdue int + SourceHealth []SourceMetrics + Resolution ResolutionMetrics + Severity SeverityMetrics + TopAssets []AssetMetric +} + +type SourceMetrics struct { + Source string + TotalOpen int + Criticals int + CisaKEVs int + Untriaged int + PatchOverdue int + PendingRisk int + TotalClosed int + Patched int + RiskAccepted int + FalsePositive int + TopAssignee string + StrategicNote string +} + +type FeedItem struct { + Actor string + ActivityType string + NewValue string + TimeAgo string +} + +type SeverityMetrics struct { + Critical int + High int + Medium int + Low int + Info int + Total int + CritPct int + HighPct int + MedPct int + LowPct int + InfoPct int +} + +type AssetMetric struct { + Asset string + Count int + Percentage int +} + +type SyncLog struct { + ID int `json:"id"` + Source string `json:"source"` + Status string `json:"status"` + RecordsProcessed int `json:"records_processed"` + ErrorMessage string `json:"error_message"` + CreatedAt string `json:"created_at"` +} diff --git a/pkg/domain/auth.go b/pkg/domain/auth.go new file mode 100644 index 0000000..e8d9072 --- /dev/null +++ b/pkg/domain/auth.go @@ -0,0 +1,18 @@ +package domain + +import "time" + +type User struct { + ID int `json:"id"` + Email string `json:"email"` + FullName string `json:"full_name"` + PasswordHash string `json:"-"` + GlobalRole string `json:"global_role"` + IsActive bool `json:"is_active"` + CreatedAt time.Time `json:"created_at"` +} +type Session struct { + Token string `json:"token"` + UserID int `json:"user_id"` + ExpiresAt time.Time `json:"expires_at"` +} diff --git a/pkg/domain/config.go b/pkg/domain/config.go new file mode 100644 index 0000000..77e66c7 --- /dev/null +++ b/pkg/domain/config.go @@ -0,0 +1,15 @@ +package domain + +type AppConfig struct { + Timezone string `json:"timezone"` + BusinessStart int `json:"business_start"` + BusinessEnd int `json:"business_end"` + DefaultExtensionDays int `json:"default_extension_days"` + Backup BackupPolicy `json:"backup"` +} + +type BackupPolicy struct { + Enabled bool `json:"enabled"` + IntervalHours int `json:"interval_hours"` + RetentionDays int `json:"retention_days"` +} diff --git a/pkg/domain/connector.go b/pkg/domain/connector.go new file mode 100644 index 0000000..9dbbcc0 --- /dev/null +++ b/pkg/domain/connector.go @@ -0,0 +1,16 @@ +package domain + +// ConnectorTemplate defines how to translate third-party JSON into ticket format +type ConnectorTemplate struct { + ID string `json:"id"` + Name string `json:"name"` + SourceDefault string `json:"source_default"` + FindingsArrayPath string `json:"findings_array_path"` + FieldMappings struct { + Title string `json:"title"` + AssetIdentifier string `json:"asset_identifier"` + Severity string `json:"severity"` + Description string `json:"description"` + RecommendedRemediation string `json:"recommended_remediation"` + } `json:"field_mappings"` +} diff --git a/pkg/domain/drafts.go b/pkg/domain/drafts.go new file mode 100644 index 0000000..108a3d0 --- /dev/null +++ b/pkg/domain/drafts.go @@ -0,0 +1,11 @@ +package domain + +type DraftTicket struct { + ID int `json:"id"` + ReportID string `json:"report_id"` + Title string `json:"title"` + Description string `json:"description"` + Severity string `json:"severity"` + AssetIdentifier string `json:"asset_identifier"` + RecommendedRemediation string `json:"recommended_remediation"` +} diff --git a/pkg/domain/export.go b/pkg/domain/export.go new file mode 100644 index 0000000..3888691 --- /dev/null +++ b/pkg/domain/export.go @@ -0,0 +1,11 @@ +package domain + +type ExportState struct { + AppConfig AppConfig `json:"app_config"` + SLAPolicies []SLAPolicy `json:"sla_policies"` + Users []User `json:"users"` + Adapters []Adapter `json:"adapters"` + Tickets []Ticket `json:"tickets"` + Version string `json:"export_version"` + ExportedAt string `json:"exported_at"` +} diff --git a/pkg/domain/store.go b/pkg/domain/store.go new file mode 100644 index 0000000..4a3af16 --- /dev/null +++ b/pkg/domain/store.go @@ -0,0 +1,95 @@ +package domain + +import ( + "context" + "net/http" + "time" +) + +// Store embeds all sub interfaces for Core +type Store interface { + TicketStore + IdentityStore + IngestStore + ConfigStore + AnalyticsStore + DraftStore +} + +// TicketStore: Core CRUD and Workflow +type TicketStore interface { + GetTickets(ctx context.Context) ([]Ticket, error) + GetDashboardTickets(ctx context.Context, tabStatus, filter, assetFilter, userEmail, userRole string, limit, offset int) ([]Ticket, int, map[string]int, error) + CreateTicket(ctx context.Context, t *Ticket) error + GetTicketByID(ctx context.Context, id int) (Ticket, error) + UpdateTicketInline(ctx context.Context, ticketID int, severity, description, remediation, comment, actor, status, assignee string) error +} + +// IdentityStore: Users, Sessions, and Dispatching +type IdentityStore interface { + CreateUser(ctx context.Context, email, fullName, passwordHash, globalRole string) (*User, error) + GetUserByEmail(ctx context.Context, email string) (*User, error) + GetUserByID(ctx context.Context, id int) (*User, error) + GetAllUsers(ctx context.Context) ([]*User, error) + GetUserCount(ctx context.Context) (int, error) + UpdateUserPassword(ctx context.Context, id int, newPasswordHash string) error + UpdateUserRole(ctx context.Context, id int, newRole string) error + DeactivateUserAndReassign(ctx context.Context, userID int) error + + CreateSession(ctx context.Context, token string, userID int, expiresAt time.Time) error + GetSession(ctx context.Context, token string) (*Session, error) + DeleteSession(ctx context.Context, token string) error + + GetWranglers(ctx context.Context) ([]User, error) +} + +// IngestStore: Scanners, Adapters, and Sync History +type IngestStore interface { + IngestTickets(ctx context.Context, tickets []Ticket) error + ProcessIngestionBatch(ctx context.Context, source string, assetIdentifier string, incoming []Ticket) error + + GetAdapters(ctx context.Context) ([]Adapter, error) + GetAdapterByID(ctx context.Context, id int) (Adapter, error) + GetAdapterByName(ctx context.Context, name string) (Adapter, error) + SaveAdapter(ctx context.Context, adapter Adapter) error + DeleteAdapter(ctx context.Context, id int) error + + LogSync(ctx context.Context, source, status string, records int, errMsg string) error + GetRecentSyncLogs(ctx context.Context, limit int) ([]SyncLog, error) +} + +// ConfigStore: Global System Settings +type ConfigStore interface { + GetAppConfig(ctx context.Context) (AppConfig, error) + UpdateAppConfig(ctx context.Context, config AppConfig) error + GetSLAPolicies(ctx context.Context) ([]SLAPolicy, error) + UpdateSLAPolicies(ctx context.Context, slas []SLAPolicy) error + UpdateBackupPolicy(ctx context.Context, policy BackupPolicy) error + ExportSystemState(ctx context.Context) (ExportState, error) +} + +// AnalyticsStore: Audit Logs and KPI Metrics +type AnalyticsStore interface { + GetSheriffAnalytics(ctx context.Context) (SheriffAnalytics, error) + GetAnalyticsSummary(ctx context.Context) (map[string]int, error) + GetGlobalActivityFeed(ctx context.Context, limit int) ([]FeedItem, error) + GetPaginatedActivityFeed(ctx context.Context, filter string, limit int, offset int) ([]FeedItem, int, error) +} + +// DraftStore: The Pentest Desk OSS, word docx +type DraftStore interface { + SaveDraft(ctx context.Context, draft DraftTicket) error + GetDraftsByReport(ctx context.Context, reportID string) ([]DraftTicket, error) + DeleteDraft(ctx context.Context, draftID string) error + UpdateDraft(ctx context.Context, draftID int, payload Ticket) error + PromotePentestDrafts(ctx context.Context, reportID string, analystEmail string, tickets []Ticket) error +} + +type Authenticator interface { + Middleware(next http.Handler) http.Handler +} + +type SLACalculator interface { + CalculateDueDate(severity string) *time.Time + CalculateTrueSLAHours(ctx context.Context, ticketID int, store Store) (float64, error) +} diff --git a/pkg/domain/ticket.go b/pkg/domain/ticket.go new file mode 100644 index 0000000..2b4d85b --- /dev/null +++ b/pkg/domain/ticket.go @@ -0,0 +1,61 @@ +package domain + +import ( + "time" +) + +// SLAPolicy represents the global SLA configuration per severity +type SLAPolicy struct { + Domain string `json:"domain"` + Severity string `json:"severity"` + DaysToRemediate int `json:"days_to_remediate"` + MaxExtensions int `json:"max_extensions"` + DaysToTriage int `json:"days_to_triage"` +} + +// AssetRiskSummary holds the rolled-up vulnerability counts for a single asset +type AssetRiskSummary struct { + AssetIdentifier string + TotalActive int + Critical int + High int + Medium int + Low int + Info int +} + +type Ticket struct { + ID int `json:"id"` + Domain string `json:"domain"` + IsOverdue bool `json:"is_overdue"` + DaysToResolve *int `json:"days_to_resolve"` + Source string `json:"source"` + AssetIdentifier string `json:"asset_identifier"` + Title string `json:"title"` + Description string `json:"description"` + RecommendedRemediation string `json:"recommended_remediation"` + Severity string `json:"severity"` + Status string `json:"status"` + + DedupeHash string `json:"dedupe_hash"` + + PatchEvidence *string `json:"patch_evidence"` + OwnerViewedAt *time.Time `json:"owner_viewed_at"` + + TriageDueDate time.Time `json:"triage_due_date"` + RemediationDueDate time.Time `json:"remediation_due_date"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + PatchedAt *time.Time `json:"patched_at"` + + SLAString string `json:"sla_string"` + Assignee string `json:"assignee"` + LatestComment string `json:"latest_comment"` +} + +// TicketAssignment represents the many-to-many relationship +type TicketAssignment struct { + TicketID int `json:"ticket_id"` + Assignee string `json:"assignee"` + Role string `json:"role"` +} diff --git a/pkg/ingest/handler.go b/pkg/ingest/handler.go new file mode 100644 index 0000000..edcb33a --- /dev/null +++ b/pkg/ingest/handler.go @@ -0,0 +1,13 @@ +package ingest + +import ( + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +type Handler struct { + Store domain.Store +} + +func NewHandler(store domain.Store) *Handler { + return &Handler{Store: store} +} diff --git a/pkg/ingest/ingest.go b/pkg/ingest/ingest.go new file mode 100644 index 0000000..3e7f678 --- /dev/null +++ b/pkg/ingest/ingest.go @@ -0,0 +1,163 @@ +package ingest + +import ( + "crypto/sha256" + "encoding/csv" + "encoding/hex" + "encoding/json" + "log" + "net/http" + "strconv" + + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +func (h *Handler) HandleIngest(w http.ResponseWriter, r *http.Request) { + decoder := json.NewDecoder(r.Body) + _, err := decoder.Token() + if err != nil { + http.Error(w, "Invalid JSON payload: expected array", http.StatusBadRequest) + return + } + + type groupKey struct { + Source string + Asset string + } + groupedTickets := make(map[groupKey][]domain.Ticket) + for decoder.More() { + var ticket domain.Ticket + if err := decoder.Decode(&ticket); err != nil { + http.Error(w, "Error parsing ticket object", http.StatusBadRequest) + return + } + + if ticket.Status == "" { + ticket.Status = "Waiting to be Triaged" + } + + if ticket.DedupeHash == "" { + hashInput := ticket.Source + "|" + ticket.AssetIdentifier + "|" + ticket.Title + hash := sha256.Sum256([]byte(hashInput)) + ticket.DedupeHash = hex.EncodeToString(hash[:]) + } + + key := groupKey{ + Source: ticket.Source, + Asset: ticket.AssetIdentifier, + } + groupedTickets[key] = append(groupedTickets[key], ticket) + } + + _, err = decoder.Token() + if err != nil { + http.Error(w, "Invalid JSON payload termination", http.StatusBadRequest) + return + } + + for key, batch := range groupedTickets { + err := h.Store.ProcessIngestionBatch(r.Context(), key.Source, key.Asset, batch) + if err != nil { + log.Printf("๐ฅ Ingestion DB Error for Asset %s: %v", key.Asset, err) + h.Store.LogSync(r.Context(), key.Source, "Failed", len(batch), err.Error()) + http.Error(w, "Database error processing batch", http.StatusInternalServerError) + return + } else { + h.Store.LogSync(r.Context(), key.Source, "Success", len(batch), "") + } + } + + w.WriteHeader(http.StatusCreated) +} + +func (h *Handler) HandleCSVIngest(w http.ResponseWriter, r *http.Request) { + if err := r.ParseMultipartForm(10 << 20); err != nil { + http.Error(w, "Failed to parse form", http.StatusBadRequest) + return + } + + adapterIDStr := r.FormValue("adapter_id") + adapterID, err := strconv.Atoi(adapterIDStr) + if err != nil { + http.Error(w, "Invalid adapter_id", http.StatusBadRequest) + return + } + + adapter, err := h.Store.GetAdapterByID(r.Context(), adapterID) + if err != nil { + http.Error(w, "Adapter mapping not found", http.StatusNotFound) + return + } + + file, _, err := r.FormFile("file") + if err != nil { + http.Error(w, "Failed to read file payload", http.StatusBadRequest) + return + } + defer file.Close() + + reader := csv.NewReader(file) + records, err := reader.ReadAll() + if err != nil || len(records) < 2 { + http.Error(w, "Invalid or empty CSV format", http.StatusBadRequest) + return + } + + headers := records[0] + headerMap := make(map[string]int) + for i, h := range headers { + headerMap[h] = i + } + + type groupKey struct { + Source string + Asset string + } + groupedTickets := make(map[groupKey][]domain.Ticket) + + for _, row := range records[1:] { + ticket := domain.Ticket{ + Source: adapter.SourceName, + Status: "Waiting to be Triaged", + } + + if idx, ok := headerMap[adapter.MappingTitle]; ok && idx < len(row) { + ticket.Title = row[idx] + } + if idx, ok := headerMap[adapter.MappingAsset]; ok && idx < len(row) { + ticket.AssetIdentifier = row[idx] + } + if idx, ok := headerMap[adapter.MappingSeverity]; ok && idx < len(row) { + ticket.Severity = row[idx] + } + if idx, ok := headerMap[adapter.MappingDescription]; ok && idx < len(row) { + ticket.Description = row[idx] + } + if adapter.MappingRemediation != "" { + if idx, ok := headerMap[adapter.MappingRemediation]; ok && idx < len(row) { + ticket.RecommendedRemediation = row[idx] + } + } + + if ticket.Title != "" && ticket.AssetIdentifier != "" { + hashInput := ticket.Source + "|" + ticket.AssetIdentifier + "|" + ticket.Title + hash := sha256.Sum256([]byte(hashInput)) + ticket.DedupeHash = hex.EncodeToString(hash[:]) + key := groupKey{Source: ticket.Source, Asset: ticket.AssetIdentifier} + groupedTickets[key] = append(groupedTickets[key], ticket) + } + } + + for key, batch := range groupedTickets { + err := h.Store.ProcessIngestionBatch(r.Context(), key.Source, key.Asset, batch) + if err != nil { + log.Printf("๐ฅ CSV Ingestion Error for Asset %s: %v", key.Asset, err) + h.Store.LogSync(r.Context(), key.Source, "Failed", len(batch), err.Error()) + http.Error(w, "Database error processing CSV batch", http.StatusInternalServerError) + return + } else { + h.Store.LogSync(r.Context(), key.Source, "Success", len(batch), "") + } + } + w.WriteHeader(http.StatusCreated) +} diff --git a/pkg/ingest/ingest_test.go b/pkg/ingest/ingest_test.go new file mode 100644 index 0000000..ee04a37 --- /dev/null +++ b/pkg/ingest/ingest_test.go @@ -0,0 +1,488 @@ +package ingest + +import ( + "bytes" + "context" + "database/sql" + "encoding/json" + "fmt" + "mime/multipart" + "net/http" + "net/http/httptest" + "runtime/debug" + "testing" + "time" + + "epigas.gitea.cloud/RiskRancher/core/pkg/datastore" + "epigas.gitea.cloud/RiskRancher/core/pkg/domain" +) + +func setupTestIngest(t *testing.T) (*Handler, *sql.DB) { + db := datastore.InitDB(":memory:") + store := datastore.NewSQLiteStore(db) + return NewHandler(store), db +} + +func GetVIPCookie(store domain.Store) *http.Cookie { + + user, err := store.GetUserByEmail(context.Background(), "vip@RiskRancher.com") + if err != nil { + user, _ = store.CreateUser(context.Background(), "vip@RiskRancher.com", "Test VIP", "hash", "Sheriff") + } + + store.CreateSession(context.Background(), "vip_token_999", user.ID, time.Now().Add(1*time.Hour)) + return &http.Cookie{Name: "session_token", Value: "vip_token_999"} +} + +func TestAutoPatchMissingFindings(t *testing.T) { + app, db := setupTestIngest(t) + defer db.Close() + + payload1 := []byte(`[ + {"title": "Vuln A", "severity": "High"}, + {"title": "Vuln B", "severity": "Medium"} + ] + `) + req1 := httptest.NewRequest(http.MethodPost, "/api/ingest", bytes.NewBuffer(payload1)) + req1.AddCookie(GetVIPCookie(app.Store)) + rr1 := httptest.NewRecorder() + app.HandleIngest(rr1, req1) + + var count int + db.QueryRow("SELECT COUNT(*) FROM tickets WHERE status = 'Waiting to be Triaged'").Scan(&count) + if count != 2 { + t.Fatalf("Expected 2 unpatched tickets, got %d", count) + } + + payload2 := []byte(` [ + {"title": "Vuln A", "severity": "High"} + ]`) + req2 := httptest.NewRequest(http.MethodPost, "/api/ingest", bytes.NewBuffer(payload2)) + req2.AddCookie(GetVIPCookie(app.Store)) + rr2 := httptest.NewRecorder() + app.HandleIngest(rr2, req2) + + var statusB string + var patchedAt sql.NullTime + + err := db.QueryRow("SELECT status, patched_at FROM tickets WHERE title = 'Vuln B'").Scan(&statusB, &patchedAt) + if err != nil { + t.Fatalf("Failed to query Vuln B: %v", err) + } + + if statusB != "Patched" { + t.Errorf("Expected Vuln B status to be 'Patched', got '%s'", statusB) + } + + if !patchedAt.Valid { + t.Errorf("Expected Vuln B to have a patched_at timestamp, but it was NULL") + } +} + +func TestHandleIngest(t *testing.T) { + a, db := setupTestIngest(t) + defer db.Close() + + sendIngestRequest := func(findings []domain.Ticket) *httptest.ResponseRecorder { + body, _ := json.Marshal(findings) + req := httptest.NewRequest(http.MethodPost, "/api/ingest", bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + rr := httptest.NewRecorder() + a.HandleIngest(rr, req) + return rr + } + + t.Run("1. Fresh Ingestion", func(t *testing.T) { + findings := []domain.Ticket{ + { + Source: "CrowdStrike", + AssetIdentifier: "Server-01", + Title: "Malware Detected", + Severity: "Critical", + }, + } + + rr := sendIngestRequest(findings) + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201 Created, got %d", rr.Code) + } + + var count int + db.QueryRow("SELECT COUNT(*) FROM tickets").Scan(&count) + if count != 1 { + t.Errorf("expected 1 ticket in DB, got %d", count) + } + }) + + t.Run("2. Deduplication", func(t *testing.T) { + time.Sleep(1 * time.Second) + + findings := []domain.Ticket{ + { + Source: "CrowdStrike", + AssetIdentifier: "Server-01", + Title: "Malware Detected", + Severity: "Critical", + Description: "Updated Description", + }, + } + + rr := sendIngestRequest(findings) + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201 Created, got %d", rr.Code) + } + + var count int + db.QueryRow("SELECT COUNT(*) FROM tickets").Scan(&count) + if count != 1 { + t.Errorf("expected still 1 ticket in DB due to dedupe, got %d", count) + } + + var desc string + db.QueryRow("SELECT description FROM tickets WHERE title = 'Malware Detected'").Scan(&desc) + if desc != "Updated Description" { + t.Errorf("expected description to update to 'Updated Description', got '%s'", desc) + } + }) + + t.Run("3. Scoped Auto-Patching", func(t *testing.T) { + findings := []domain.Ticket{ + { + Source: "CrowdStrike", + AssetIdentifier: "Server-01", + Title: "Outdated Antivirus", + Severity: "High", + }, + } + + rr := sendIngestRequest(findings) + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201 Created, got %d", rr.Code) + } + + var totalCount int + db.QueryRow("SELECT COUNT(*) FROM tickets").Scan(&totalCount) + if totalCount != 2 { + t.Errorf("expected 2 total tickets in DB, got %d", totalCount) + } + + var status string + db.QueryRow("SELECT status FROM tickets WHERE title = 'Malware Detected'").Scan(&status) + if status != "Patched" { + t.Errorf("expected missing vulnerability to be auto-patched, but status is '%s'", status) + } + }) +} + +func TestCSVIngestion(t *testing.T) { + app, db := setupTestIngest(t) + defer db.Close() + + _, err := db.Exec(` + INSERT INTO data_adapters ( + id, name, source_name, findings_path, + mapping_title, mapping_asset, mapping_severity, mapping_description, mapping_remediation + ) VALUES ( + 999, 'Legacy Scanner V1', 'LegacyScan', '.', + 'Vuln_Name', 'Server_IP', 'Risk_Level', 'Details', 'Fix_Steps' + ) + `) + if err != nil { + t.Fatalf("Failed to setup test adapter: %v", err) + } + + rawCSV := `Vuln_Name,Server_IP,Risk_Level,Details,Junk_Column +SQL Injection,192.168.1.50,Critical,Found in login form,ignore_this +Outdated Apache,192.168.1.50,High,Upgrade to 2.4.50,ignore_this` + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + part, _ := writer.CreateFormFile("file", "scan_results.csv") + part.Write([]byte(rawCSV)) + + writer.WriteField("adapter_id", "999") + writer.Close() + + req := httptest.NewRequest(http.MethodPost, "/api/ingest/csv", body) + req.Header.Set("Content-Type", writer.FormDataContentType()) + rr := httptest.NewRecorder() + + app.HandleCSVIngest(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("Expected 201 Created, got %d. Body: %s", rr.Code, rr.Body.String()) + } + + var count int + db.QueryRow("SELECT COUNT(*) FROM tickets WHERE source = 'LegacyScan'").Scan(&count) + + if count != 2 { + t.Errorf("Expected 2 tickets ingested from CSV, got %d", count) + } + + var title, severity string + db.QueryRow("SELECT title, severity FROM tickets WHERE title = 'SQL Injection'").Scan(&title, &severity) + if severity != "Critical" { + t.Errorf("CSV Mapping failed! Expected severity 'Critical', got '%s'", severity) + } +} + +func TestAutoPatchEdgeCases(t *testing.T) { + h, db := setupTestIngest(t) // Swapped 'app' for 'h' + defer db.Close() + + db.Exec(` + INSERT INTO tickets (source, title, severity, dedupe_hash, status) + VALUES ('App B', 'App B Vuln', 'High', 'hash-app-b', 'Waiting to be Triaged') + `) + + payload1 := []byte(`[ + {"source": "App A", "title": "Vuln 1", "severity": "High"}, + {"source": "App A", "title": "Vuln 2", "severity": "Medium"} + ]`) + req1 := httptest.NewRequest(http.MethodPost, "/api/ingest", bytes.NewBuffer(payload1)) + req1.AddCookie(GetVIPCookie(h.Store)) + req1.Header.Set("Content-Type", "application/json") + + rr1 := httptest.NewRecorder() + h.HandleIngest(rr1, req1) + + payload2 := []byte(`[ + {"source": "App A", "title": "Vuln 1", "severity": "High"} + ]`) + req2 := httptest.NewRequest(http.MethodPost, "/api/ingest", bytes.NewBuffer(payload2)) + req2.AddCookie(GetVIPCookie(h.Store)) + req2.Header.Set("Content-Type", "application/json") + + rr2 := httptest.NewRecorder() + h.HandleIngest(rr2, req2) + + var status2 string + db.QueryRow("SELECT status FROM tickets WHERE title = 'Vuln 2'").Scan(&status2) + if status2 != "Patched" { + t.Errorf("Expected Vuln 2 to be 'Patched', got '%s'", status2) + } + + var statusB string + db.QueryRow("SELECT status FROM tickets WHERE title = 'App B Vuln'").Scan(&statusB) + if statusB != "Waiting to be Triaged" { + t.Errorf("CRITICAL FAILURE: Blast radius exceeded! App B status changed to '%s'", statusB) + } +} + +func TestHandleIngest_MultiAssetDiffing(t *testing.T) { + // THE GO 1.26 GC TWEAK: Force Go to keep RAM usage under 2GB + // This makes the GC run aggressively, trading a tiny bit of CPU for massive RAM savings. + previousLimit := debug.SetMemoryLimit(2 * 1024 * 1024 * 1024) + defer debug.SetMemoryLimit(previousLimit) + + a, db := setupTestIngest(t) + db.Exec(`PRAGMA synchronous = OFF;`) + defer db.Close() + + _, err := db.Exec(`INSERT INTO tickets (source, asset_identifier, title, status, severity, dedupe_hash) VALUES + ('Trivy', 'Server-A', 'Old Vuln A', 'Waiting to be Triaged', 'High', 'hash_A_1'), + ('Trivy', 'Server-B', 'Old Vuln B', 'Waiting to be Triaged', 'Critical', 'hash_B_1')`) + if err != nil { + t.Fatalf("Failed to seed database: %v", err) + } + + incomingPayload := []domain.Ticket{ + { + Source: "Trivy", + AssetIdentifier: "Server-A", + Title: "New Vuln A", + Severity: "High", + DedupeHash: "hash_A_2", + }, + } + + body, _ := json.Marshal(incomingPayload) + req := httptest.NewRequest(http.MethodPost, "/api/ingest", bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + rr := httptest.NewRecorder() + + a.HandleIngest(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("Expected 201 Created, got %d", rr.Code) + } + + var statusA string + db.QueryRow(`SELECT status FROM tickets WHERE dedupe_hash = 'hash_A_1'`).Scan(&statusA) + if statusA != "Patched" { + t.Errorf("Expected Server-A's old ticket to be Auto-Patched, got '%s'", statusA) + } + + var statusB string + db.QueryRow(`SELECT status FROM tickets WHERE dedupe_hash = 'hash_B_1'`).Scan(&statusB) + if statusB != "Waiting to be Triaged" { + t.Errorf("CRITICAL BUG: Server-B's ticket was altered! Expected 'Waiting to be Triaged', got '%s'", statusB) + } +} + +func TestHandleIngest_OneMillionTicketStressTest(t *testing.T) { + if testing.Short() { + t.Skip("Skipping 1-million ticket stress test in short mode") + } + + a, db := setupTestIngest(t) + defer db.Close() + + numAssets := 10000 + vulnsPerAsset := 100 + + t.Logf("Generating baseline payload for %d tickets...", numAssets*vulnsPerAsset) + + baselinePayload := make([]domain.Ticket, 0, numAssets*vulnsPerAsset) + for assetID := 1; assetID <= numAssets; assetID++ { + assetName := fmt.Sprintf("Server-%05d", assetID) + for vulnID := 1; vulnID <= vulnsPerAsset; vulnID++ { + baselinePayload = append(baselinePayload, domain.Ticket{ + Source: "HeavyLoadTester", + AssetIdentifier: assetName, + Title: fmt.Sprintf("Vulnerability-%03d", vulnID), + Severity: "High", + }) + } + } + + t.Log("Marshaling 1M tickets to JSON...") + body1, _ := json.Marshal(baselinePayload) + req1 := httptest.NewRequest(http.MethodPost, "/api/ingest", bytes.NewBuffer(body1)) + rr1 := httptest.NewRecorder() + + t.Log("Hitting API with Baseline 1M Scan...") + a.HandleIngest(rr1, req1) + + if rr1.Code != http.StatusCreated { + t.Fatalf("Baseline ingest failed with status %d", rr1.Code) + } + + var count1 int + db.QueryRow(`SELECT COUNT(*) FROM tickets`).Scan(&count1) + if count1 != 1000000 { + t.Fatalf("Expected 1,000,000 tickets inserted, got %d", count1) + } + + t.Log("Generating Diff payload...") + + diffPayload := make([]domain.Ticket, 0, numAssets*vulnsPerAsset) + for assetID := 1; assetID <= numAssets; assetID++ { + assetName := fmt.Sprintf("Server-%05d", assetID) + + for vulnID := 1; vulnID <= 80; vulnID++ { + diffPayload = append(diffPayload, domain.Ticket{ + Source: "HeavyLoadTester", + AssetIdentifier: assetName, + Title: fmt.Sprintf("Vulnerability-%03d", vulnID), + Severity: "High", + }) + } + + for vulnID := 101; vulnID <= 120; vulnID++ { + diffPayload = append(diffPayload, domain.Ticket{ + Source: "HeavyLoadTester", + AssetIdentifier: assetName, + Title: fmt.Sprintf("Vulnerability-%03d", vulnID), + Severity: "Critical", + }) + } + } + + t.Log("Marshaling Diff payload to JSON...") + body2, _ := json.Marshal(diffPayload) + req2 := httptest.NewRequest(http.MethodPost, "/api/ingest", bytes.NewBuffer(body2)) + rr2 := httptest.NewRecorder() + + t.Log("Hitting API with Diff 1M Scan...") + a.HandleIngest(rr2, req2) + + if rr2.Code != http.StatusCreated { + t.Fatalf("Diff ingest failed with status %d", rr2.Code) + } + + t.Log("Running Assertions...") + + var totalRows int + db.QueryRow(`SELECT COUNT(*) FROM tickets`).Scan(&totalRows) + if totalRows != 1200000 { + t.Errorf("Expected exactly 1,200,000 total rows in DB, got %d", totalRows) + } + + var patchedCount int + db.QueryRow(`SELECT COUNT(*) FROM tickets WHERE status = 'Patched'`).Scan(&patchedCount) + if patchedCount != 200000 { + t.Errorf("Expected exactly 200,000 auto-patched tickets, got %d", patchedCount) + } + + var openCount int + db.QueryRow(`SELECT COUNT(*) FROM tickets WHERE status = 'Waiting to be Triaged'`).Scan(&openCount) + if openCount != 1000000 { + t.Errorf("Expected exactly 1,000,000 open tickets, got %d", openCount) + } +} + +func TestSyncLogReceipts(t *testing.T) { + h, db := setupTestIngest(t) + defer db.Close() + db.Exec(`CREATE TABLE IF NOT EXISTS sync_logs (id INTEGER PRIMARY KEY, source TEXT, status TEXT, records_processed INTEGER, error_message TEXT)`) + + payload := []byte(`[{"source": "Dependabot", "asset_identifier": "repo-1", "title": "Vuln 1", "severity": "High"}]`) + req1 := httptest.NewRequest(http.MethodPost, "/api/ingest", bytes.NewBuffer(payload)) + req1.AddCookie(GetVIPCookie(h.Store)) + req1.Header.Set("Content-Type", "application/json") + h.HandleIngest(httptest.NewRecorder(), req1) + + badPayload := []byte(`[{"source": "Dependabot", "title": "Vuln 1", "severity": "High", "status": "GarbageStatus"}]`) + + req2 := httptest.NewRequest(http.MethodPost, "/api/ingest", bytes.NewBuffer(badPayload)) + req2.AddCookie(GetVIPCookie(h.Store)) + req2.Header.Set("Content-Type", "application/json") + h.HandleIngest(httptest.NewRecorder(), req2) + + var successCount, failCount, processed int + db.QueryRow("SELECT COUNT(*), MAX(records_processed) FROM sync_logs WHERE source = 'Dependabot' AND status = 'Success'").Scan(&successCount, &processed) + db.QueryRow("SELECT COUNT(*) FROM sync_logs WHERE status = 'Failed'").Scan(&failCount) + + if successCount != 1 || processed != 1 { + t.Errorf("System failed to log successful sync receipt. Got count: %d, processed: %d", successCount, processed) + } + if failCount != 1 { + t.Errorf("System failed to log failed sync receipt. Got count: %d", failCount) + } +} + +func TestUIFileDropIngestion(t *testing.T) { + h, db := setupTestIngest(t) + defer db.Close() + + res, err := db.Exec(`INSERT INTO data_adapters (name, source_name, mapping_title, mapping_asset, mapping_severity) VALUES ('UI-Tool', 'UITool', 'Name', 'Host', 'Risk')`) + if err != nil { + t.Fatalf("failed to seed adapter: %v", err) + } + adapterID, _ := res.LastInsertId() + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + part, _ := writer.CreateFormFile("file", "test_findings.csv") + part.Write([]byte("Name,Host,Risk\nUnauthorized Access,10.0.0.1,Critical")) + + _ = writer.WriteField("adapter_id", fmt.Sprintf("%d", adapterID)) + writer.Close() + + req := httptest.NewRequest(http.MethodPost, "/api/ingest/csv", body) + req.Header.Set("Content-Type", writer.FormDataContentType()) + req.AddCookie(GetVIPCookie(h.Store)) + rr := httptest.NewRecorder() + h.HandleCSVIngest(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201 Created, got %d: %s", rr.Code, rr.Body.String()) + } + var count int + db.QueryRow("SELECT COUNT(*) FROM tickets WHERE source = 'UITool'").Scan(&count) + if count != 1 { + t.Errorf("UI Drop failed: expected 1 ticket, got %d", count) + } +} diff --git a/pkg/report/docx_html.go b/pkg/report/docx_html.go new file mode 100644 index 0000000..8ecf826 --- /dev/null +++ b/pkg/report/docx_html.go @@ -0,0 +1,113 @@ +package report + +import ( + "archive/zip" + "bytes" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "net/http" + "path/filepath" + "strings" +) + +// Relationships maps the pkg rId to the actual media file +type Relationships struct { + XMLName xml.Name `xml:"Relationships"` + Rel []struct { + Id string `xml:"Id,attr"` + Target string `xml:"Target,attr"` + } `xml:"Relationship"` +} + +func ServeDOCXAsHTML(w http.ResponseWriter, docxPath string) { + r, err := zip.OpenReader(docxPath) + if err != nil { + http.Error(w, "Failed to open DOCX archive", http.StatusInternalServerError) + return + } + defer r.Close() + + relsMap := make(map[string]string) + for _, f := range r.File { + if f.Name == "word/_rels/document.xml.rels" { + rc, _ := f.Open() + var rels Relationships + xml.NewDecoder(rc).Decode(&rels) + rc.Close() + for _, rel := range rels.Rel { + relsMap[rel.Id] = rel.Target + } + break + } + } + + mediaMap := make(map[string]string) + for _, f := range r.File { + if strings.HasPrefix(f.Name, "word/media/") { + rc, _ := f.Open() + data, _ := io.ReadAll(rc) + rc.Close() + + ext := strings.TrimPrefix(filepath.Ext(f.Name), ".") + if ext == "jpeg" || ext == "jpg" { + ext = "jpeg" + } + b64 := base64.StdEncoding.EncodeToString(data) + mediaMap[f.Name] = fmt.Sprintf("data:image/%s;base64,%s", ext, b64) + } + } + + var htmlOutput bytes.Buffer + var inParagraph bool + + for _, f := range r.File { + if f.Name == "word/document.xml" { + rc, _ := f.Open() + decoder := xml.NewDecoder(rc) + + for { + token, err := decoder.Token() + if err != nil { + break + } + + switch se := token.(type) { + case xml.StartElement: + if se.Name.Local == "p" { + htmlOutput.WriteString("
")
+ inParagraph = true
+ }
+ if se.Name.Local == "t" {
+ var text string
+ decoder.DecodeElement(&text, &se)
+ htmlOutput.WriteString(text)
+ }
+ if se.Name.Local == "blip" {
+ for _, attr := range se.Attr {
+ if attr.Name.Local == "embed" {
+ targetPath := relsMap[attr.Value]
+ fullMediaPath := "word/" + targetPath
+
+ if b64URI, exists := mediaMap[fullMediaPath]; exists {
+ imgTag := fmt.Sprintf(`
`, b64URI)
+ htmlOutput.WriteString(imgTag)
+ }
+ }
+ }
+ }
+ case xml.EndElement:
+ if se.Name.Local == "p" && inParagraph {
+ htmlOutput.WriteString("
No activity found.
`; + else { + data.feed.forEach(item => { + const badgeStr = item.NewValue ? `${item.NewValue}` : ""; + container.innerHTML += `๐จ Error: ${err.message}
`; } + } + + const logFilter = document.getElementById("logFilter"); + if(logFilter) { + logFilter.addEventListener("change", () => { currentLogPage = 1; loadLogs(); }); + document.getElementById("logPrevBtn").addEventListener("click", () => { if(currentLogPage > 1) { currentLogPage--; loadLogs(); } }); + document.getElementById("logNextBtn").addEventListener("click", () => { currentLogPage++; loadLogs(); }); + loadLogs(); + } + + // --- UI INITIALIZERS --- + document.querySelectorAll('.risk-row').forEach(row => { + const rationaleDiv = row.querySelector('.risk-rationale-cell'); + const typeCell = row.querySelector('.risk-type-cell'); + if (!rationaleDiv || !typeCell) return; + let text = rationaleDiv.innerText.trim(); + if (text.includes('[EXTENSION]')) { + typeCell.innerHTML = 'โฑ๏ธ TIME EXTENSION'; + rationaleDiv.innerText = text.replace('[EXTENSION]', '').trim(); + rationaleDiv.style.borderLeft = "3px solid #ea580c"; + } else if (text.includes('[RISK ACCEPTANCE]')) { + typeCell.innerHTML = '๐ RISK ACCEPTANCE'; + rationaleDiv.innerText = text.replace('[RISK ACCEPTANCE]', '').trim(); + rationaleDiv.style.borderLeft = "3px solid #dc2626"; + row.style.backgroundColor = "#fff5f5"; + } else { + typeCell.innerHTML = '๐ STANDARD'; + } + }); + + + // --- SLA MATRIX SAVE --- + const saveConfigBtn = document.getElementById("saveConfigBtn"); + if(saveConfigBtn) { + saveConfigBtn.addEventListener("click", async function() { + this.innerText = "Saving..."; this.disabled = true; + const payload = { + timezone: document.getElementById("configTimezone").value, + business_start: parseInt(document.getElementById("configBizStart").value), + business_end: parseInt(document.getElementById("configBizEnd").value), + default_extension_days: parseInt(document.getElementById("configDefExt").value), + slas: Array.from(document.querySelectorAll(".sla-row")).map(row => ({ + domain: row.getAttribute("data-domain"), + severity: row.querySelector("span.badge").innerText.trim(), + days_to_triage: parseInt(row.querySelector(".sla-triage").value), + days_to_remediate: parseInt(row.querySelector(".sla-patch").value), + max_extensions: parseInt(row.querySelector(".sla-ext").value) + })) + }; + const res = await fetch("/api/config", { method: "PUT", headers: { "Content-Type": "application/json" }, body: JSON.stringify(payload) }); + if (res.ok) { this.innerText = "Saved!"; this.style.background = "#10b981"; setTimeout(() => { this.innerText = "Save Changes"; this.style.background = ""; this.disabled = false; }, 2000); } + else { alert("Failed"); this.innerText = "Save Changes"; this.disabled = false; } + }); + } + + // SLA Domain Filter + const domainFilter = document.getElementById("slaDomainFilter"); + if (domainFilter) { + domainFilter.addEventListener("change", function() { + document.querySelectorAll(".sla-row").forEach(row => row.style.display = row.getAttribute("data-domain") === this.value ? "table-row" : "none"); + }); + domainFilter.dispatchEvent(new Event("change")); + } + + // --- MODAL EVENT LISTENERS --- + const openUserModal = document.getElementById("openUserModal"); + if (openUserModal) { + openUserModal.addEventListener("click", () => document.getElementById("userModal").style.display = "flex"); + document.getElementById("cancelUser").addEventListener("click", () => document.getElementById("userModal").style.display = "none"); + document.getElementById("submitUser").addEventListener("click", async function() { + const payload = { full_name: document.getElementById("newUserName").value, email: document.getElementById("newUserEmail").value, password: document.getElementById("newUserPassword").value, global_role: document.getElementById("newUserRole").value }; + if (!payload.full_name || !payload.email || !payload.password) return alert("Fill out all fields."); + this.disabled = true; + await fetch("/api/admin/users", { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify(payload) }).then(async r => r.ok ? window.location.reload() : alert(await r.text())); + this.disabled = false; + }); + } + + const newRuleType = document.getElementById("newRuleType"); + if (newRuleType) { + newRuleType.addEventListener("change", function() { + document.getElementById("newRuleMatchSource").style.display = this.value === "Source" ? "block" : "none"; + document.getElementById("newRuleMatchAsset").style.display = this.value === "Source" ? "none" : "block"; + }); + document.getElementById("openRuleModal").addEventListener("click", () => document.getElementById("ruleModal").style.display = "flex"); + document.getElementById("cancelRule").addEventListener("click", () => document.getElementById("ruleModal").style.display = "none"); + document.getElementById("submitRule").addEventListener("click", async function() { + const ruleType = document.getElementById("newRuleType").value; + const matchVal = ruleType === "Source" ? document.getElementById("newRuleMatchSource").value : document.getElementById("newRuleMatchAsset").value; + const assigneeSelect = document.getElementById("newRuleAssignee"); + const selectedEmails = Array.from(assigneeSelect.selectedOptions).map(opt => opt.value).join(","); + if (!matchVal || !selectedEmails) return alert("Fill out match value and assignee."); + this.disabled = true; this.innerText = "Saving..."; + await fetch("/api/admin/routing", { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ rule_type: ruleType, match_value: matchVal, assignee: selectedEmails, role: "RangeHand" }) }).then(async r => r.ok ? window.location.reload() : alert(await r.text())); + this.disabled = false; this.innerText = "Deploy Rule"; + }); + } +}); \ No newline at end of file diff --git a/ui/static/auth.js b/ui/static/auth.js new file mode 100644 index 0000000..6dbab17 --- /dev/null +++ b/ui/static/auth.js @@ -0,0 +1,52 @@ +document.addEventListener("DOMContentLoaded", () => { + + // --- LOGIN LOGIC --- + const loginForm = document.getElementById("loginForm"); + if (loginForm) { + loginForm.addEventListener("submit", async (e) => { + e.preventDefault(); + const btn = document.getElementById("submitBtn"); + const errDiv = document.getElementById("errorMsg"); + + btn.innerText = "Authenticating..."; btn.disabled = true; errDiv.style.display = "none"; + + try { + const res = await fetch("/api/auth/login", { + method: "POST", headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ email: document.getElementById("email").value, password: document.getElementById("password").value }) + }); + + if (res.ok) window.location.href = "/dashboard"; + else { errDiv.innerText = "Invalid credentials. Please try again."; errDiv.style.display = "block"; btn.innerText = "Sign In"; btn.disabled = false; } + } catch (err) { errDiv.innerText = "Network error."; errDiv.style.display = "block"; btn.innerText = "Sign In"; btn.disabled = false; } + }); + } + + // --- REGISTER LOGIC --- + const registerForm = document.getElementById("registerForm"); + if (registerForm) { + registerForm.addEventListener("submit", async (e) => { + e.preventDefault(); + const btn = document.getElementById("submitBtn"); + const errDiv = document.getElementById("errorMsg"); + + btn.innerText = "Securing System..."; btn.disabled = true; errDiv.style.display = "none"; + + const payload = { + full_name: document.getElementById("fullname").value, email: document.getElementById("email").value, + password: document.getElementById("password").value, global_role: "Sheriff" + }; + + try { + const res = await fetch("/api/auth/register", { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify(payload) }); + if (res.ok) { + const loginRes = await fetch("/api/auth/login", { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ email: payload.email, password: payload.password }) }); + if (loginRes.ok) window.location.href = "/dashboard"; else window.location.href = "/login"; + } else { + errDiv.innerText = await res.text() || "Registration failed. System might already be locked."; + errDiv.style.display = "block"; btn.innerText = "Claim Sheriff Access"; btn.disabled = false; + } + } catch (err) { errDiv.innerText = "Network error."; errDiv.style.display = "block"; btn.innerText = "Claim Sheriff Access"; btn.disabled = false; } + }); + } +}); \ No newline at end of file diff --git a/ui/static/builder.js b/ui/static/builder.js new file mode 100644 index 0000000..3c71906 --- /dev/null +++ b/ui/static/builder.js @@ -0,0 +1,198 @@ +const fileInput = document.getElementById('local-file'); +const pathInput = document.getElementById('findings_path'); +let currentRawData = null; +let isJson = false; + +fileInput.addEventListener('change', (e) => { + const file = e.target.files[0]; + if (!file) return; + isJson = file.name.toLowerCase().endsWith('.json'); + + const reader = new FileReader(); + reader.onload = (event) => { + currentRawData = event.target.result; + document.getElementById('preview-placeholder').style.display = 'none'; + + if (isJson) { + try { + const parsed = JSON.parse(currentRawData); + const guessedPath = autoDetectArrayPath(parsed); + if (guessedPath) { + pathInput.value = guessedPath; + } + } catch (e) { + console.error("Auto-detect failed:", e); + } + } + + processPreview(); + }; + reader.readAsText(file); +}); + +pathInput.addEventListener('input', () => { + if (currentRawData && isJson) processPreview(); +}); + +function autoDetectArrayPath(obj) { + if (Array.isArray(obj)) return "."; + + let bestPath = ""; + let maxLen = -1; + + function search(currentObj, currentPath) { + if (Array.isArray(currentObj)) { + if (currentObj.length > 0 && typeof currentObj[0] === 'object') { + if (currentObj.length > maxLen) { + maxLen = currentObj.length; + bestPath = currentPath; + } + } + return; + } + if (currentObj !== null && typeof currentObj === 'object') { + for (let key in currentObj) { + let nextPath = currentPath ? currentPath + "." + key : key; + search(currentObj[key], nextPath); + } + } + } + + search(obj, ""); + return bestPath || "."; +} + + +function processPreview() { + let headers = []; + let rows = []; + + if (isJson) { + try { + const parsed = JSON.parse(currentRawData); + const findings = getNestedValue(parsed, pathInput.value); + + + if (!Array.isArray(findings) || findings.length === 0) { + const rawPreview = JSON.stringify(parsed, null, 2).substring(0, 1500) + "\n\n... (file truncated for preview)"; + + document.getElementById('preview-table-container').innerHTML = + `โ ๏ธ Path "${pathInput.value}" is not an array.
+Here is the structure of your file to help you find the correct path:
+${rawPreview}
+ | ${h} | `).join('') + '
|---|
| ${cell} | `).join('') + '
Click "Load" to generate a data preview and extract column headers.
+Strategic Command, Personnel, & Operations
+Tracking {{.TotalCount}} vulnerable assets across the ranch.
+| Asset Identifier | +Total Active | +Critical | +High | +Medium | +Low | +
|---|---|---|---|---|---|
| + + {{.AssetIdentifier}} + + | +{{.TotalActive}} | +{{.Critical}} | +{{.High}} | +{{.Medium}} | +{{.Low}} | +
| + No vulnerable assets found. The ranch is secure! + | +|||||
| Name | Role | Actions |
|---|---|---|
| {{.FullName}} {{.Email}} |
+ {{.GlobalRole}} | ++ + + + | +
Automate ticket assignment and triage based on asset tags or CVEs.
+ Learn about RiskRancher Pro → +Locked to Standard FedRAMP/NIST Default Timeframes
+| Severity | Triage | Patch |
|---|---|---|
| Critical | 1 | 3 |
| High | 3 | 14 |
| Medium | 7 | 30 |
| Low | 14 | 90 |
Real-time tamper-evident system audit log.
+{{.Analytics.ActiveKEVs}}
+ + +{{.Analytics.OpenCriticals}}
+ + +{{.Analytics.TotalOverdue}}
+ +{{.Analytics.GlobalMTTRDays}}
+No vulnerable assets found.
+ {{end}} +Diagnostic breakdown of global KPIs, bottlenecks, and SLA tracking by scanner source.
+| Integration | +๐ฅ High Risk Drivers (Ties to KPI Metrics) |
+ ๐ Analyst Backlog (Triage Phase) |
+ โณ IT Bottlenecks (Patch Phase) |
+ ๐ก๏ธ Resolution Hygiene (Closed Profile) |
+ Strategic Insight | +
|---|---|---|---|---|---|
|
+ {{.Source}} + {{.TotalOpen}} Total Open + |
+
+
+
+ {{if gt .Criticals 0}}{{.Criticals}} CRIT{{end}}
+ {{if gt .CisaKEVs 0}}{{.CisaKEVs}} KEV{{end}}
+ {{if and (eq .Criticals 0) (eq .CisaKEVs 0)}}-{{end}}
+
+ |
+
+ + {{if gt .Untriaged 0}} + {{.Untriaged}} Pending + {{else}} + โ Clear + {{end}} + | + ++ {{if gt .PatchOverdue 0}} + {{.PatchOverdue}} Overdue + {{else if gt .PendingRisk 0}} + {{.PendingRisk}} Excepted + {{else}} + โ Met + {{end}} + | + +
+ {{.TotalClosed}} Closed
+
+ {{if gt .Patched 0}}โ {{.Patched}}{{end}}
+ {{if gt .RiskAccepted 0}}โ๏ธ {{.RiskAccepted}}{{end}}
+ {{if gt .FalsePositive 0}}๐ซ {{.FalsePositive}}{{end}}
+ {{if eq .TotalClosed 0}}-{{end}}
+
+ |
+
+
+ {{.StrategicNote}}
+Lead: {{.TopAssignee}}
+ |
+
| No active sources found. | |||||
Operational ledger of all API pushes, webhooks, and CSV uploads.
+| Timestamp | +Source | +Status | +Records Processed | +Diagnostics | +
|---|---|---|---|---|
| {{.CreatedAt}} | +{{.Source}} | ++ {{if eq .Status "Success"}} + โ Success + {{else}} + โ Failed + {{end}} + | +{{.RecordsProcessed}} | +{{.ErrorMessage}} | +
| No syncs recorded yet. | ||||
{{.CurrentAsset}}| + | Severity | +Source | +Finding | +IT Assignee | +{{if eq .CurrentTab "holding_pen"}}โณ Time to Triage{{else}}SLA Status{{end}} | + {{if ne .CurrentTab "holding_pen"}}Action | {{end}} +|
|---|---|---|---|---|---|---|---|
| + | {{.Severity}} | +{{.Source}} | +
+
+
+
+
+
+
+
+
+
+ {{.Title}}
+
+ {{if eq .Status "Returned to Security"}}
+
+ ๐ Returned by IT: {{.LatestComment}}
+
+ {{end}}
+ |
+ + {{if eq .Assignee "Unassigned"}} + Unassigned + {{else}} + {{.Assignee}} + {{end}} + | ++ {{if eq $.CurrentTab "holding_pen"}} + + {{else}} + {{if .IsOverdue}}{{.SLAString}}{{else}}{{.SLAString}}{{end}} + {{end}} + | + {{if eq $.CurrentTab "holding_pen"}} + {{else if eq $.CurrentTab "chute"}} ++ {{else if eq $.CurrentTab "archives"}} + |
+ {{if eq .Status "Patched"}}
+ โ
RESOLVED
+ {{else if eq .Status "False Positive"}}
+ ๐ป IGNORED
+ {{end}}
+
+ Archived {{.UpdatedAt.Format "Jan 02"}} (Took {{.SLAString}})
+
+ |
+ {{end}}
+
Bring your findings into the ranch.
+ +Upload a Word (DOCX) or PDF penetration test report. We'll extract the findings and map them to tickets.
+ Upload Report +Using a proprietary scanner? Build a visual JSON mapping to seamlessly ingest its outputs.
+ Build New Adapter +Sign in to your SOC Dashboard
+ + + + + ++ Welcome to RiskRancher. The first user to register will automatically be granted the Sheriff (Global Admin) role. +
+ + + + + +Highlight text in the DOCX viewer to extract vulnerabilities.
+Upload a .docx manual assessment to enter the clipping parser.
+ +or click to browse your computer
+ +