Publishing: - Add publisher.go for posting feed items to AT Protocol PDS - Support deterministic rkeys from SHA256(guid + discoveredAt) - Handle multiple URLs in posts with facets for each link - Image embed support (app.bsky.embed.images) for up to 4 images - External embed with thumbnail fallback - Podcast/audio enclosure URLs included in post text Media extraction: - Parse RSS enclosures (audio, video, images) - Extract Media RSS content and thumbnails - Extract images from HTML content in descriptions - Store enclosure and imageUrls in items table SQLite stability improvements: - Add synchronous=NORMAL and wal_autocheckpoint pragmas - Connection pool tuning (idle conns, max lifetime) - Periodic WAL checkpoint every 5 minutes - Hourly integrity checks with PRAGMA quick_check - Daily hot backup via VACUUM INTO - Docker stop_grace_period: 30s for graceful shutdown Dashboard: - Feed publishing UI and API endpoints - Account creation with invite codes Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
336 lines
7.9 KiB
Go
336 lines
7.9 KiB
Go
package main
|
|
|
|
import (
|
|
"database/sql"
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
"runtime"
|
|
"strings"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
|
|
"golang.org/x/net/html"
|
|
)
|
|
|
|
type Crawler struct {
|
|
MaxDepth int
|
|
MaxPagesPerHost int
|
|
Timeout time.Duration
|
|
UserAgent string
|
|
visited sync.Map
|
|
feedsMu sync.Mutex
|
|
client *http.Client
|
|
hostsProcessed int32
|
|
feedsChecked int32
|
|
startTime time.Time
|
|
db *sql.DB
|
|
displayedCrawlRate int
|
|
displayedCheckRate int
|
|
domainsImported int32
|
|
cachedStats *DashboardStats
|
|
cachedAllDomains []DomainStat
|
|
statsMu sync.RWMutex
|
|
}
|
|
|
|
func NewCrawler(dbPath string) (*Crawler, error) {
|
|
db, err := OpenDatabase(dbPath)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to open database: %v", err)
|
|
}
|
|
|
|
return &Crawler{
|
|
MaxDepth: 10,
|
|
MaxPagesPerHost: 10,
|
|
Timeout: 10 * time.Second,
|
|
UserAgent: "FeedCrawler/1.0",
|
|
startTime: time.Now(),
|
|
db: db,
|
|
client: &http.Client{
|
|
Timeout: 10 * time.Second,
|
|
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
|
if len(via) >= 10 {
|
|
return fmt.Errorf("stopped after 10 redirects")
|
|
}
|
|
return nil
|
|
},
|
|
},
|
|
}, nil
|
|
}
|
|
|
|
func (c *Crawler) Close() error {
|
|
if c.db != nil {
|
|
// Checkpoint WAL to merge it back into main database before closing
|
|
// This prevents corruption if the container is stopped mid-write
|
|
fmt.Println("Checkpointing WAL...")
|
|
if _, err := c.db.Exec("PRAGMA wal_checkpoint(TRUNCATE)"); err != nil {
|
|
fmt.Printf("WAL checkpoint warning: %v\n", err)
|
|
}
|
|
fmt.Println("Closing database...")
|
|
return c.db.Close()
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// StartStatsLoop updates cached stats once per minute
|
|
func (c *Crawler) StartStatsLoop() {
|
|
for {
|
|
c.UpdateStats()
|
|
time.Sleep(1 * time.Minute)
|
|
}
|
|
}
|
|
|
|
// StartCleanupLoop runs item cleanup once per week
|
|
func (c *Crawler) StartCleanupLoop() {
|
|
for {
|
|
deleted, err := c.CleanupOldItems()
|
|
if err != nil {
|
|
fmt.Printf("Cleanup error: %v\n", err)
|
|
} else if deleted > 0 {
|
|
fmt.Printf("Cleanup: removed %d old items\n", deleted)
|
|
}
|
|
time.Sleep(7 * 24 * time.Hour)
|
|
}
|
|
}
|
|
|
|
// StartMaintenanceLoop performs periodic database maintenance
|
|
// - WAL checkpoint every 5 minutes to prevent WAL bloat and reduce corruption risk
|
|
// - Quick integrity check every hour to detect issues early
|
|
// - Hot backup every 24 hours for recovery
|
|
func (c *Crawler) StartMaintenanceLoop() {
|
|
checkpointTicker := time.NewTicker(5 * time.Minute)
|
|
integrityTicker := time.NewTicker(1 * time.Hour)
|
|
backupTicker := time.NewTicker(24 * time.Hour)
|
|
defer checkpointTicker.Stop()
|
|
defer integrityTicker.Stop()
|
|
defer backupTicker.Stop()
|
|
|
|
for {
|
|
select {
|
|
case <-checkpointTicker.C:
|
|
// Passive checkpoint - doesn't block writers
|
|
if _, err := c.db.Exec("PRAGMA wal_checkpoint(PASSIVE)"); err != nil {
|
|
fmt.Printf("WAL checkpoint error: %v\n", err)
|
|
}
|
|
|
|
case <-integrityTicker.C:
|
|
// Quick check is faster than full integrity_check
|
|
var result string
|
|
if err := c.db.QueryRow("PRAGMA quick_check").Scan(&result); err != nil {
|
|
fmt.Printf("Integrity check error: %v\n", err)
|
|
} else if result != "ok" {
|
|
fmt.Printf("WARNING: Database integrity issue detected: %s\n", result)
|
|
}
|
|
|
|
case <-backupTicker.C:
|
|
c.createBackup()
|
|
}
|
|
}
|
|
}
|
|
|
|
// createBackup creates a hot backup of the database using SQLite's backup API
|
|
func (c *Crawler) createBackup() {
|
|
backupPath := "feeds/feeds.db.backup"
|
|
fmt.Println("Creating database backup...")
|
|
|
|
// Use SQLite's online backup via VACUUM INTO (available in SQLite 3.27+)
|
|
// This creates a consistent snapshot without blocking writers
|
|
if _, err := c.db.Exec("VACUUM INTO ?", backupPath); err != nil {
|
|
fmt.Printf("Backup error: %v\n", err)
|
|
return
|
|
}
|
|
|
|
fmt.Printf("Backup created: %s\n", backupPath)
|
|
}
|
|
|
|
// StartCrawlLoop runs the domain crawling loop independently
|
|
func (c *Crawler) StartCrawlLoop() {
|
|
numWorkers := runtime.NumCPU()
|
|
if numWorkers < 1 {
|
|
numWorkers = 1
|
|
}
|
|
|
|
// Buffered channel for domain work
|
|
workChan := make(chan *Domain, 256)
|
|
|
|
// Start workers
|
|
for i := 0; i < numWorkers; i++ {
|
|
go func() {
|
|
for domain := range workChan {
|
|
feedsFound, crawlErr := c.crawlHost(domain.Host)
|
|
errStr := ""
|
|
if crawlErr != nil {
|
|
errStr = crawlErr.Error()
|
|
}
|
|
if err := c.markDomainCrawled(domain.Host, feedsFound, errStr); err != nil {
|
|
fmt.Printf("Error marking domain %s as crawled: %v\n", domain.Host, err)
|
|
}
|
|
}
|
|
}()
|
|
}
|
|
|
|
const fetchSize = 1000
|
|
for {
|
|
domains, err := c.GetUncheckedDomains(fetchSize)
|
|
if err != nil {
|
|
fmt.Printf("Error fetching domains: %v\n", err)
|
|
}
|
|
|
|
if len(domains) == 0 {
|
|
c.displayedCrawlRate = 0
|
|
time.Sleep(1 * time.Second)
|
|
continue
|
|
}
|
|
|
|
fmt.Printf("%s crawl: %d domains to check\n", time.Now().Format("15:04:05"), len(domains))
|
|
|
|
for _, domain := range domains {
|
|
workChan <- domain
|
|
}
|
|
|
|
time.Sleep(1 * time.Second)
|
|
}
|
|
}
|
|
|
|
// StartCheckLoop runs the feed checking loop independently
|
|
func (c *Crawler) StartCheckLoop() {
|
|
numWorkers := runtime.NumCPU()
|
|
if numWorkers < 1 {
|
|
numWorkers = 1
|
|
}
|
|
|
|
// Buffered channel for feed work
|
|
workChan := make(chan *Feed, 256)
|
|
|
|
// Start workers
|
|
for i := 0; i < numWorkers; i++ {
|
|
go func() {
|
|
for feed := range workChan {
|
|
c.CheckFeed(feed)
|
|
}
|
|
}()
|
|
}
|
|
|
|
const fetchSize = 1000
|
|
for {
|
|
feeds, err := c.GetFeedsDueForCheck(fetchSize)
|
|
if err != nil {
|
|
fmt.Printf("Error fetching feeds: %v\n", err)
|
|
}
|
|
|
|
if len(feeds) == 0 {
|
|
c.displayedCheckRate = 0
|
|
time.Sleep(1 * time.Second)
|
|
continue
|
|
}
|
|
|
|
fmt.Printf("%s check: %d feeds to check\n", time.Now().Format("15:04:05"), len(feeds))
|
|
|
|
for _, feed := range feeds {
|
|
workChan <- feed
|
|
}
|
|
|
|
time.Sleep(1 * time.Second)
|
|
}
|
|
}
|
|
|
|
func (c *Crawler) crawlHost(host string) (feedsFound int, err error) {
|
|
atomic.AddInt32(&c.hostsProcessed, 1)
|
|
|
|
localVisited := make(map[string]bool)
|
|
pagesVisited := 0
|
|
|
|
// Try HTTPS first, fall back to HTTP if no pages were visited
|
|
c.crawlPage("https://"+host, host, 0, localVisited, &pagesVisited)
|
|
if pagesVisited == 0 {
|
|
c.crawlPage("http://"+host, host, 0, localVisited, &pagesVisited)
|
|
}
|
|
|
|
// Count feeds found for this specific host
|
|
feedsFound, _ = c.GetFeedCountByHost(host)
|
|
|
|
if pagesVisited == 0 {
|
|
return feedsFound, fmt.Errorf("could not connect")
|
|
}
|
|
|
|
return feedsFound, nil
|
|
}
|
|
|
|
func (c *Crawler) crawlPage(pageURL, sourceHost string, depth int, localVisited map[string]bool, pagesVisited *int) {
|
|
if *pagesVisited >= c.MaxPagesPerHost || depth > c.MaxDepth {
|
|
return
|
|
}
|
|
|
|
if localVisited[pageURL] {
|
|
return
|
|
}
|
|
|
|
if _, visited := c.visited.LoadOrStore(pageURL, true); visited {
|
|
return
|
|
}
|
|
|
|
localVisited[pageURL] = true
|
|
*pagesVisited++
|
|
|
|
body, contentType, headers, err := c.fetchPage(pageURL)
|
|
if err != nil {
|
|
return
|
|
}
|
|
|
|
if c.isFeedContent(body, contentType) {
|
|
c.processFeed(pageURL, sourceHost, body, headers)
|
|
return
|
|
}
|
|
|
|
doc, err := html.Parse(strings.NewReader(body))
|
|
if err != nil {
|
|
return
|
|
}
|
|
|
|
feedLinks := c.extractFeedLinks(doc, pageURL)
|
|
for _, fl := range feedLinks {
|
|
c.addFeed(fl.URL, fl.Type, sourceHost, pageURL)
|
|
}
|
|
|
|
anchorFeeds := c.extractAnchorFeeds(doc, pageURL)
|
|
for _, fl := range anchorFeeds {
|
|
c.addFeed(fl.URL, fl.Type, sourceHost, pageURL)
|
|
}
|
|
|
|
if depth < c.MaxDepth {
|
|
links := c.extractLinks(doc, pageURL)
|
|
for _, link := range links {
|
|
if shouldCrawl(link, pageURL) {
|
|
c.crawlPage(link, sourceHost, depth+1, localVisited, pagesVisited)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func (c *Crawler) fetchPage(pageURL string) (string, string, http.Header, error) {
|
|
req, err := http.NewRequest("GET", pageURL, nil)
|
|
if err != nil {
|
|
return "", "", nil, err
|
|
}
|
|
req.Header.Set("User-Agent", c.UserAgent)
|
|
|
|
resp, err := c.client.Do(req)
|
|
if err != nil {
|
|
return "", "", nil, err
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
return "", "", nil, fmt.Errorf("status code: %d", resp.StatusCode)
|
|
}
|
|
|
|
bodyBytes, err := io.ReadAll(resp.Body)
|
|
if err != nil {
|
|
return "", "", nil, err
|
|
}
|
|
|
|
contentType := resp.Header.Get("Content-Type")
|
|
return string(bodyBytes), contentType, resp.Header, nil
|
|
}
|