repeater/internal/logging/logging.go
Pierre-Olivier Mercier 07f8673f2f Harden API surface and station/wifi backends
Bind to localhost by default and stop echoing backend errors (which can
embed credentials or low-level details) back over the API and log
broadcast. Validate hotspot SSID/passphrase/channel before writing
hostapd.conf and tighten its mode to 0600 since it stores the WPA PSK.
Restrict WebSocket upgrades to same-origin so a LAN browser can't be
turned into a proxy for the API.

Guard shared state: status reads/writes go through StatusMutex (the
periodic updater races with the toggle and status handlers otherwise),
broadcastToWebSockets no longer mutates the client map under RLock, and
station-event callbacks now run under SafeGo so a panic in app code can't
take down the daemon. Stop channels in hostapd, dhcp, and iwd signal
monitors are now closed under sync.Once to survive concurrent Stop calls.

App.Shutdown is idempotent and waits for the periodic loops before
closing backends, so signal-driven and deferred shutdowns no longer race.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-01 21:56:50 +08:00

109 lines
2.5 KiB
Go

package logging
import (
"log"
"sync"
"time"
"github.com/gorilla/websocket"
"github.com/nemunaire/repeater/internal/models"
)
var (
logEntries []models.LogEntry
logMutex sync.RWMutex
websocketClients = make(map[*websocket.Conn]bool)
clientsMutex sync.RWMutex
)
// AddLog adds a new log entry
func AddLog(source, message string) {
logMutex.Lock()
entry := models.LogEntry{
Timestamp: time.Now(),
Source: source,
Message: message,
}
logEntries = append(logEntries, entry)
// Keep only the last 100 logs
if len(logEntries) > 100 {
logEntries = logEntries[len(logEntries)-100:]
}
logMutex.Unlock()
// Broadcast to WebSocket clients
broadcastToWebSockets(entry)
// Log to console
log.Printf("[%s] %s", source, message)
}
// GetLogs returns all log entries
func GetLogs() []models.LogEntry {
logMutex.RLock()
defer logMutex.RUnlock()
logs := make([]models.LogEntry, len(logEntries))
copy(logs, logEntries)
return logs
}
// ClearLogs clears all log entries
func ClearLogs() {
logMutex.Lock()
logEntries = []models.LogEntry{}
logMutex.Unlock()
AddLog("Système", "Logs effacés")
}
// RegisterWebSocketClient registers a new WebSocket client
func RegisterWebSocketClient(conn *websocket.Conn) {
clientsMutex.Lock()
websocketClients[conn] = true
clientsMutex.Unlock()
// Send existing logs to the new client
logMutex.RLock()
for _, entry := range logEntries {
conn.WriteJSON(entry)
}
logMutex.RUnlock()
}
// UnregisterWebSocketClient removes a WebSocket client
func UnregisterWebSocketClient(conn *websocket.Conn) {
clientsMutex.Lock()
delete(websocketClients, conn)
clientsMutex.Unlock()
}
// broadcastToWebSockets sends a log entry to all connected WebSocket clients.
// Dead clients are collected during the read pass and pruned under the write
// lock afterwards — mutating the map while only holding RLock would race
// with concurrent Register/Unregister and panic Go's map runtime.
func broadcastToWebSockets(entry models.LogEntry) {
clientsMutex.RLock()
clients := make([]*websocket.Conn, 0, len(websocketClients))
for client := range websocketClients {
clients = append(clients, client)
}
clientsMutex.RUnlock()
var dead []*websocket.Conn
for _, client := range clients {
if err := client.WriteJSON(entry); err != nil {
client.Close()
dead = append(dead, client)
}
}
if len(dead) > 0 {
clientsMutex.Lock()
for _, c := range dead {
delete(websocketClients, c)
}
clientsMutex.Unlock()
}
}