Files
primal a998168c59 Add dashboard.go - stats types and calculation
Migrated from app/dashboard.go:
- Dashboard struct with DB connection and stats caching
- DashboardStats, TLDStat, RecentFeed, DomainStat types
- Stats calculation methods (collectDomainStats, collectFeedStats)
- Background stats update loop

Note: Runtime rates (domains/min, etc.) not available in standalone
dashboard - these are crawler-specific metrics.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-02-02 12:42:17 -05:00

306 lines
7.4 KiB
Go

package main
import (
"fmt"
"sync"
"time"
"github.com/1440news/shared"
)
// Dashboard is the main dashboard service
type Dashboard struct {
db *shared.DB
statsMu sync.RWMutex
cachedStats *DashboardStats
cachedAllDomains []DomainStat
startTime time.Time
}
// NewDashboard creates a new dashboard instance
func NewDashboard(connString string) (*Dashboard, error) {
db, err := shared.OpenDatabase(connString)
if err != nil {
return nil, err
}
return &Dashboard{
db: db,
startTime: time.Now(),
}, nil
}
// Close closes the database connection
func (d *Dashboard) Close() error {
return d.db.Close()
}
// DashboardStats holds all statistics for the dashboard
type DashboardStats struct {
// Domain stats
TotalDomains int `json:"total_domains"`
HoldDomains int `json:"hold_domains"`
PassDomains int `json:"pass_domains"`
SkipDomains int `json:"skip_domains"`
DeadDomains int `json:"dead_domains"`
// Feed stats
TotalFeeds int `json:"total_feeds"`
AliveFeeds int `json:"alive_feeds"` // status='pass' (healthy feeds)
PublishFeeds int `json:"publish_feeds"` // publish_status='pass' (approved for publishing)
SkipFeeds int `json:"skip_feeds"`
HoldFeeds int `json:"hold_feeds"`
DeadFeeds int `json:"dead_feeds"`
EmptyFeeds int `json:"empty_feeds"`
RSSFeeds int `json:"rss_feeds"`
AtomFeeds int `json:"atom_feeds"`
JSONFeeds int `json:"json_feeds"`
UnknownFeeds int `json:"unknown_feeds"`
// Processing rates (per minute) - populated by crawler API if available
DomainsCrawled int32 `json:"domains_crawled"`
DomainCheckRate int `json:"domain_check_rate"`
FeedCrawlRate int `json:"feed_crawl_rate"`
FeedCheckRate int `json:"feed_check_rate"`
// Timing
UpdatedAt time.Time `json:"updated_at"`
}
type TLDStat struct {
TLD string `json:"tld"`
Count int `json:"count"`
}
type RecentFeed struct {
URL string `json:"url"`
Title string `json:"title"`
Type string `json:"type"`
DiscoveredAt time.Time `json:"discovered_at"`
}
type DomainStat struct {
Host string `json:"host"`
FeedsFound int `json:"feeds_found"`
}
// commaFormat formats an integer with comma separators
func commaFormat(n int) string {
s := fmt.Sprintf("%d", n)
if len(s) <= 3 {
return s
}
var result []byte
for i, c := range s {
if i > 0 && (len(s)-i)%3 == 0 {
result = append(result, ',')
}
result = append(result, byte(c))
}
return string(result)
}
// UpdateStats recalculates and caches dashboard statistics
func (d *Dashboard) UpdateStats() {
fmt.Println("UpdateStats: calculating stats...")
stats, err := d.calculateStats()
if err != nil {
fmt.Printf("UpdateStats: error calculating stats: %v\n", err)
return
}
// Cache all domains with feeds (runs in background, so slow query is OK)
fmt.Println("UpdateStats: fetching all domains...")
allDomains := d.fetchAllDomainsFromDB()
fmt.Printf("UpdateStats: got %d domains\n", len(allDomains))
d.statsMu.Lock()
d.cachedStats = stats
d.cachedAllDomains = allDomains
d.statsMu.Unlock()
fmt.Println("UpdateStats: complete")
}
func (d *Dashboard) fetchAllDomainsFromDB() []DomainStat {
rows, err := d.db.Query(`
SELECT domain_tld as tld, domain_host as domain_host, COUNT(*) as cnt FROM feeds
GROUP BY domain_tld, domain_host
ORDER BY domain_tld, domain_host
`)
if err != nil {
fmt.Printf("fetchAllDomainsFromDB error: %v\n", err)
return nil
}
defer rows.Close()
var domains []DomainStat
for rows.Next() {
var ds DomainStat
var tld string
if err := rows.Scan(&tld, &ds.Host, &ds.FeedsFound); err != nil {
continue
}
domains = append(domains, ds)
}
return domains
}
// GetDashboardStats returns cached statistics (returns empty stats if not yet cached)
func (d *Dashboard) GetDashboardStats() (*DashboardStats, error) {
d.statsMu.RLock()
stats := d.cachedStats
d.statsMu.RUnlock()
if stats != nil {
return stats, nil
}
// Return empty stats while background calculation runs (don't block HTTP requests)
return &DashboardStats{UpdatedAt: time.Now()}, nil
}
// GetCachedAllDomains returns the cached list of all domains
func (d *Dashboard) GetCachedAllDomains() []DomainStat {
d.statsMu.RLock()
defer d.statsMu.RUnlock()
return d.cachedAllDomains
}
// calculateStats collects all statistics for the dashboard
func (d *Dashboard) calculateStats() (*DashboardStats, error) {
stats := &DashboardStats{
UpdatedAt: time.Now(),
// Runtime rates not available in standalone dashboard
// TODO: fetch from crawler API if needed
}
// Get domain stats
if err := d.collectDomainStats(stats); err != nil {
return nil, err
}
// Get feed stats
if err := d.collectFeedStats(stats); err != nil {
return nil, err
}
return stats, nil
}
func (d *Dashboard) collectDomainStats(stats *DashboardStats) error {
// Use COUNT(*) for total count
err := d.db.QueryRow("SELECT COUNT(*) FROM domains").Scan(&stats.TotalDomains)
if err != nil {
return err
}
// Single query to get all status counts (one index scan instead of three)
rows, err := d.db.Query("SELECT status, COUNT(*) FROM domains GROUP BY status")
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var status string
var count int
if err := rows.Scan(&status, &count); err != nil {
continue
}
switch status {
case "hold":
stats.HoldDomains = count
case "pass":
stats.PassDomains = count
case "skip":
stats.SkipDomains = count
case "dead":
stats.DeadDomains = count
}
}
if err := rows.Err(); err != nil {
return err
}
return rows.Err()
}
func (d *Dashboard) collectFeedStats(stats *DashboardStats) error {
// Use COUNT(*) for total count
err := d.db.QueryRow("SELECT COUNT(*) FROM feeds").Scan(&stats.TotalFeeds)
if err != nil {
return err
}
// Get status counts
statusRows, err := d.db.Query("SELECT status, COUNT(*) FROM feeds GROUP BY status")
if err != nil {
return err
}
defer statusRows.Close()
for statusRows.Next() {
var status *string
var count int
if err := statusRows.Scan(&status, &count); err != nil {
continue
}
if status != nil {
switch *status {
case "pass":
stats.AliveFeeds = count
case "skip":
stats.SkipFeeds = count
case "hold":
stats.HoldFeeds = count
case "dead":
stats.DeadFeeds = count
}
}
}
// Count feeds approved for publishing (publish_status='pass')
d.db.QueryRow("SELECT COUNT(*) FROM feeds WHERE publish_status = 'pass'").Scan(&stats.PublishFeeds)
// Count empty feeds (item_count = 0 or NULL)
d.db.QueryRow("SELECT COUNT(*) FROM feeds WHERE item_count IS NULL OR item_count = 0").Scan(&stats.EmptyFeeds)
// Single query to get all type counts (one index scan instead of three)
rows, err := d.db.Query("SELECT type, COUNT(*) FROM feeds GROUP BY type")
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var feedType *string
var count int
if err := rows.Scan(&feedType, &count); err != nil {
continue
}
if feedType == nil {
stats.UnknownFeeds += count
} else {
switch *feedType {
case "rss":
stats.RSSFeeds = count
case "atom":
stats.AtomFeeds = count
case "json":
stats.JSONFeeds = count
default:
stats.UnknownFeeds += count
}
}
}
return rows.Err()
}
// StartStatsLoop starts a background loop that updates stats every minute
func (d *Dashboard) StartStatsLoop() {
ticker := time.NewTicker(1 * time.Minute)
defer ticker.Stop()
for range ticker.C {
d.UpdateStats()
}
}