1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
|
package manager
import (
"context"
"errors"
"fmt"
"os"
"time"
"gitlab.torproject.org/rgdd/ct/pkg/metadata"
"rgdd.se/silentct/internal/feedback"
"rgdd.se/silentct/internal/logger"
"rgdd.se/silentct/internal/metrics"
"rgdd.se/silentct/internal/monitor"
"rgdd.se/silentct/pkg/policy"
"rgdd.se/silentct/pkg/storage"
)
type Config struct {
Policy policy.Policy
Bootstrap bool // Whether a new storage should be initialized from scratch
Directory string // Path to a directory where everything will be stored
Metrics *metrics.Metrics
// Optional
Logger *logger.Logger // Where to output messages and with what verbosity
AlertDelay time.Duration // Time before alerting on certificates that are unaccounted for
MetadataRefreshInterval time.Duration // How often to update the list of monitored logs
ShutdownTimeout time.Duration // Force shutdown after this timeout (FIXME: should not be needed)
}
type Manager struct {
Config
storage.Storage
feventCh chan []feedback.Event
meventCh chan monitor.Event
mconfigCh chan monitor.MonitoredLog
errorCh chan error
}
func New(cfg Config, fch chan []feedback.Event, mch chan monitor.Event, cch chan monitor.MonitoredLog, ech chan error) (Manager, error) {
if !cfg.Logger.IsConfigured() {
cfg.Logger = logger.New(logger.Config{Level: logger.LevelNotice, File: os.Stdout})
}
if cfg.MetadataRefreshInterval == 0 {
cfg.MetadataRefreshInterval = 1 * time.Hour
}
if cfg.ShutdownTimeout == 0 {
cfg.ShutdownTimeout = 10 * time.Second
}
s, err := storage.New(storage.Config{
Bootstrap: cfg.Bootstrap,
Directory: cfg.Directory,
Logger: cfg.Logger,
AlertDelay: cfg.AlertDelay,
StaticLogs: cfg.Policy.StaticLogs,
RemoveLogs: cfg.Policy.RemoveLogs,
})
if err != nil {
return Manager{}, err
}
for _, log := range s.LogList.Generate() {
state, err := s.BootstrapLog(context.Background(), log, cfg.Bootstrap)
if errors.Is(err, storage.ErrorMonitorStateExists) {
continue
}
if err != nil {
return Manager{}, err
}
cfg.Logger.Infof("bootstrapping log %s at next index %d\n", log.URL, state.NextIndex)
}
return Manager{Config: cfg, Storage: s, feventCh: fch, meventCh: mch, mconfigCh: cch, errorCh: ech}, nil
}
func (mgr *Manager) Run(ctx context.Context) error {
if err := mgr.startupConfig(); err != nil {
return fmt.Errorf("unable to do startup config: %v", err)
}
metadataTicker := time.NewTicker(mgr.MetadataRefreshInterval)
defer metadataTicker.Stop()
alertTicker := time.NewTicker(mgr.AlertDelay)
defer alertTicker.Stop()
shutdown := false
for {
select {
case <-metadataTicker.C:
if err := mgr.metadataJob(ctx); err != nil {
return fmt.Errorf("unable to run metadata job: %v", err)
}
case <-alertTicker.C:
if err := mgr.alertJob(); err != nil {
return fmt.Errorf("unable to run alert job: %v\n", err)
}
case ev := <-mgr.meventCh:
if err := mgr.monitorJob(ev); err != nil {
return fmt.Errorf("unable to run monitor job: %v", err)
}
if err := mgr.alertJob(); err != nil {
return fmt.Errorf("unable to run alert job: %v\n", err)
}
case ev := <-mgr.feventCh:
if err := mgr.feedbackJob(ev); err != nil {
return fmt.Errorf("unable to run server job: %v", err)
}
if err := mgr.alertJob(); err != nil {
return fmt.Errorf("unable to run alert job: %v\n", err)
}
case err := <-mgr.errorCh:
if err := mgr.errorJob(err); err != nil {
return fmt.Errorf("unable to run error job: %v", err)
}
case <-ctx.Done():
if !shutdown {
shutdown = true
mgr.Logger.Debugf("shutdown scheduled in %v\n", mgr.ShutdownTimeout)
// defer shutdown so that all channels can be drained
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(context.Background(), mgr.ShutdownTimeout)
defer cancel()
continue
}
mgr.Logger.Debugf("manager shutdown\n")
os.Exit(0) // FIXME: return nil without hanging, unpredictable gh.com/google/ct-go fetcher shutdown?
}
}
}
func (mgr *Manager) startupConfig() error {
mgr.Logger.Debugf("startup configuring contains %d logs\n", len(mgr.Storage.LogList.Generate()))
for _, log := range mgr.Storage.LogList.Generate() {
state, err := mgr.GetMonitorState(log)
if err != nil {
return err
}
mgr.mconfigCh <- monitor.MonitoredLog{Config: log, State: state}
mgr.Metrics.LogState(state)
}
return nil
}
func (mgr *Manager) metadataJob(ctx context.Context) error {
mgr.Logger.Debugf("running metadata job\n")
added, removed, err := mgr.LogList.Update(ctx)
if err != nil {
if mgr.LogList.IsStale() {
return fmt.Errorf("unable to update log list which is now stale: %v", err)
}
}
mgr.removeLogs(removed)
mgr.addLogs(ctx, added)
return nil
}
func (mgr *Manager) removeLogs(logs []metadata.Log) {
mgr.Logger.Debugf("removing %d logs\n", len(logs))
for _, log := range logs {
state, _ := mgr.GetMonitorState(log)
mgr.Logger.Infof("removing log %s with %d entries in its backlog\n", log.URL, state.TreeSize-state.NextIndex)
mgr.mconfigCh <- monitor.MonitoredLog{Config: log}
mgr.Metrics.RemoveLogState(state)
}
}
func (mgr *Manager) addLogs(ctx context.Context, logs []metadata.Log) {
mgr.Logger.Debugf("adding %d logs\n", len(logs))
for _, log := range logs {
state, err := mgr.BootstrapLog(ctx, log, false)
if errors.Is(err, storage.ErrorMonitorStateExists) {
mgr.Logger.Infof("adding log %s with existing state on disk\n", log.URL)
} else if err != nil {
mgr.Logger.Noticef("restart required: failed to bootstrap new log %s: %v\n", log.URL, err)
} else {
mgr.Logger.Infof("bootstrapping log %s at next index 0\n", log.URL)
}
mgr.mconfigCh <- monitor.MonitoredLog{Config: log, State: state}
mgr.Metrics.LogState(state)
}
}
func (mgr *Manager) feedbackJob(events []feedback.Event) error {
mgr.Logger.Debugf("received feedback with %d events", len(events))
for _, ev := range events {
if err := mgr.AddChain(ev.NodeName, ev.PEMChain); err != nil {
return err
}
}
return nil
}
func (mgr *Manager) monitorJob(msg monitor.Event) error {
mgr.Logger.Debugf("new state for %s\n", msg.Summary())
if err := mgr.AddEntries(msg.State.LogID, msg.Matches); err != nil {
return err
}
if err := mgr.SetMonitorState(msg.State.LogID, msg.State); err != nil {
return err
}
mgr.Metrics.LogState(msg.State)
return nil
}
func (mgr *Manager) alertJob() error {
alerts, err := mgr.Index.TriggerAlerts()
if err != nil {
return err
}
for _, alert := range alerts {
mgr.Logger.Noticef("certificate mis-issuance? No allowlisting for %s\n", alert.StoredAt)
}
mgr.Metrics.CertificateAlert(mgr.Storage.Index.Alerting())
return nil
}
func (mgr *Manager) errorJob(err error) error {
mgr.Logger.Debugf("received error: %v\n", err)
return nil
}
|