package manager import ( "context" "errors" "fmt" "os" "time" "gitlab.torproject.org/rgdd/ct/pkg/metadata" "rgdd.se/silent-ct/internal/feedback" "rgdd.se/silent-ct/internal/logger" "rgdd.se/silent-ct/internal/monitor" "rgdd.se/silent-ct/pkg/policy" "rgdd.se/silent-ct/pkg/storage" ) type Config struct { Policy policy.Policy Bootstrap bool // Whether a new storage should be initialized from scratch Directory string // Path to a directory where everything will be stored // Optional Logger *logger.Logger // Where to output messages and with what verbosity AlertDelay time.Duration // Time before alerting on certificates that are unaccounted for MetadataRefreshInterval time.Duration // How often to update the list of monitored logs ShutdownTimeout time.Duration // Force shutdown after this timeout (FIXME: should not be needed) } type Manager struct { Config storage.Storage feventCh chan []feedback.Event meventCh chan monitor.Event mconfigCh chan monitor.MonitoredLog errorCh chan error } func New(cfg Config, fch chan []feedback.Event, mch chan monitor.Event, cch chan monitor.MonitoredLog, ech chan error) (Manager, error) { if !cfg.Logger.IsConfigured() { cfg.Logger = logger.New(logger.Config{Level: logger.LevelNotice, File: os.Stdout}) } if cfg.MetadataRefreshInterval == 0 { cfg.MetadataRefreshInterval = 1 * time.Hour } if cfg.ShutdownTimeout == 0 { cfg.ShutdownTimeout = 10 * time.Second } s, err := storage.New(storage.Config{ Bootstrap: cfg.Bootstrap, Directory: cfg.Directory, Logger: cfg.Logger, AlertDelay: cfg.AlertDelay, StaticLogs: cfg.Policy.StaticLogs, RemoveLogs: cfg.Policy.RemoveLogs, }) if err != nil { return Manager{}, err } for _, log := range s.LogList.Generate() { state, err := s.BootstrapLog(context.Background(), log, cfg.Bootstrap) if errors.Is(err, storage.ErrorMonitorStateExists) { continue } if err != nil { return Manager{}, err } cfg.Logger.Infof("bootstrapping log %s at next index %d\n", log.URL, state.NextIndex) } return Manager{Config: cfg, Storage: s, feventCh: fch, meventCh: mch, mconfigCh: cch, errorCh: ech}, nil } func (mgr *Manager) Run(ctx context.Context) error { if err := mgr.startupConfig(); err != nil { return fmt.Errorf("unable to do startup config: %v", err) } metadataTicker := time.NewTicker(mgr.MetadataRefreshInterval) defer metadataTicker.Stop() shutdown := false for { select { case <-metadataTicker.C: if err := mgr.metadataJob(ctx); err != nil { return fmt.Errorf("unable to run metadata job: %v", err) } case ev := <-mgr.meventCh: if err := mgr.monitorJob(ev); err != nil { return fmt.Errorf("unable to run monitor job: %v", err) } if err := mgr.alertJob(); err != nil { return fmt.Errorf("unable to run alert job: %v\n", err) } case ev := <-mgr.feventCh: if err := mgr.feedbackJob(ev); err != nil { return fmt.Errorf("unable to run server job: %v", err) } if err := mgr.alertJob(); err != nil { return fmt.Errorf("unable to run alert job: %v\n", err) } case err := <-mgr.errorCh: if err := mgr.errorJob(err); err != nil { return fmt.Errorf("unable to run error job: %v", err) } case <-ctx.Done(): if !shutdown { shutdown = true mgr.Logger.Debugf("shutdown scheduled in %v\n", mgr.ShutdownTimeout) // defer shutdown so that all channels can be drained var cancel context.CancelFunc ctx, cancel = context.WithTimeout(context.Background(), mgr.ShutdownTimeout) defer cancel() continue } mgr.Logger.Debugf("manager shutdown\n") os.Exit(0) // FIXME: return nil without hanging, unpredictable gh.com/google/ct-go fetcher shutdown? } } } func (mgr *Manager) startupConfig() error { mgr.Logger.Debugf("startup configuring contains %d logs\n", len(mgr.Storage.LogList.Generate())) for _, log := range mgr.Storage.LogList.Generate() { state, err := mgr.GetMonitorState(log) if err != nil { return err } mgr.mconfigCh <- monitor.MonitoredLog{Config: log, State: state} } return nil } func (mgr *Manager) metadataJob(ctx context.Context) error { mgr.Logger.Debugf("running metadata job\n") added, removed, err := mgr.LogList.Update(ctx) if err != nil { if mgr.LogList.IsStale() { return fmt.Errorf("unable to update log list which is now stale: %v", err) } } mgr.removeLogs(removed) mgr.addLogs(ctx, added) return nil } func (mgr *Manager) removeLogs(logs []metadata.Log) { mgr.Logger.Debugf("removing %d logs\n", len(logs)) for _, log := range logs { state, _ := mgr.GetMonitorState(log) mgr.Logger.Infof("removing log %s with %d entries in its backlog\n", log.URL, state.TreeSize-state.NextIndex) mgr.mconfigCh <- monitor.MonitoredLog{Config: log} } } func (mgr *Manager) addLogs(ctx context.Context, logs []metadata.Log) { mgr.Logger.Debugf("adding %d logs\n", len(logs)) for _, log := range logs { state, err := mgr.BootstrapLog(ctx, log, false) if errors.Is(err, storage.ErrorMonitorStateExists) { mgr.Logger.Infof("adding log %s with existing state on disk\n", log.URL) } else if err != nil { mgr.Logger.Noticef("restart required: failed to bootstrap new log %s: %v\n", log.URL, err) } else { mgr.Logger.Infof("bootstrapping log %s at next index 0\n", log.URL) } mgr.mconfigCh <- monitor.MonitoredLog{Config: log, State: state} } } func (mgr *Manager) feedbackJob(events []feedback.Event) error { mgr.Logger.Debugf("received feedback with %d events", len(events)) for _, ev := range events { if err := mgr.AddChain(ev.NodeName, ev.PEMChain); err != nil { return err } } return nil } func (mgr *Manager) monitorJob(msg monitor.Event) error { mgr.Logger.Debugf("new state for %s\n", msg.Summary()) if err := mgr.AddEntries(msg.State.LogID, msg.Matches); err != nil { return err } return mgr.SetMonitorState(msg.State.LogID, msg.State) } func (mgr *Manager) alertJob() error { alerts, err := mgr.Index.TriggerAlerts() if err != nil { return err } for _, alert := range alerts { mgr.Logger.Noticef("certificate mis-issuance? No node submitted certificate %s\n", alert.StoredAt) } return nil } func (mgr *Manager) errorJob(err error) error { mgr.Logger.Debugf("received error: %v\n", err) return nil }