From e18d36ebae30536c77c61cd5da123991e0ca1629 Mon Sep 17 00:00:00 2001 From: Rasmus Dahlberg Date: Sun, 31 Dec 2023 09:39:25 +0100 Subject: Add drafty prototype --- README.md | 2 + cmd/silent-ctmoon/main.go | 217 ++++++++++++++++++++++++++++++ cmd/silent-ctnode/main.go | 127 ++++++++++++++++++ docs/http-api.md | 29 ---- docs/introduction.md | 32 +---- docs/state.md | 9 -- docs/storage.md | 3 + docs/submission.md | 22 ++++ go.mod | 2 +- go.sum | 2 + internal/feedback/feedback.go | 127 ++++++++++++++++++ internal/flagopt/flagopt.go | 26 ++++ internal/ioutil/ioutil.go | 56 ++++++++ internal/logger/logger.go | 96 ++++++++++++++ internal/manager/helpers.go | 52 -------- internal/manager/manager.go | 219 ++++++++++++++++++++++-------- internal/merkle/TODO | 1 - internal/merkle/compact.go | 115 ---------------- internal/merkle/merkle.go | 271 ------------------------------------- internal/monitor/chunks.go | 89 +++++++++++++ internal/monitor/matcher.go | 13 ++ internal/monitor/monitor.go | 173 ++++++++++++++++++++++++ internal/monitor/tail.go | 200 ++++++++++++++++++++++++++++ internal/options/options.go | 97 -------------- internal/x509util/x509util.go | 44 ------- main.go | 109 --------------- pkg/crtutil/crt_util.go | 71 ++++++++++ pkg/monitor/chunks.go | 88 ------------- pkg/monitor/errors.go | 41 ------ pkg/monitor/matcher.go | 90 ------------- pkg/monitor/messages.go | 40 ------ pkg/monitor/monitor.go | 286 ---------------------------------------- pkg/policy/node.go | 93 +++++++++++++ pkg/policy/policy.go | 18 +++ pkg/policy/wildcard.go | 83 ++++++++++++ pkg/server/handler.go | 32 ----- pkg/server/messages.go | 23 ---- pkg/server/nodes.go | 53 -------- pkg/server/server.go | 133 ------------------- pkg/storage/errors.go | 5 + pkg/storage/index/index.go | 103 +++++++++++++++ pkg/storage/index/inmem.go | 113 ++++++++++++++++ pkg/storage/loglist/loglist.go | 145 ++++++++++++++++++++ pkg/storage/loglist/metadata.go | 62 +++++++++ pkg/storage/storage.go | 155 ++++++++++++++++++++++ pkg/submission/submission.go | 104 +++++++++++++++ 46 files changed, 2280 insertions(+), 1591 deletions(-) create mode 100644 cmd/silent-ctmoon/main.go create mode 100644 cmd/silent-ctnode/main.go delete mode 100644 docs/http-api.md delete mode 100644 docs/state.md create mode 100644 docs/storage.md create mode 100644 docs/submission.md create mode 100644 internal/feedback/feedback.go create mode 100644 internal/flagopt/flagopt.go create mode 100644 internal/ioutil/ioutil.go create mode 100644 internal/logger/logger.go delete mode 100644 internal/manager/helpers.go delete mode 100644 internal/merkle/TODO delete mode 100644 internal/merkle/compact.go delete mode 100644 internal/merkle/merkle.go create mode 100644 internal/monitor/chunks.go create mode 100644 internal/monitor/matcher.go create mode 100644 internal/monitor/monitor.go create mode 100644 internal/monitor/tail.go delete mode 100644 internal/options/options.go delete mode 100644 internal/x509util/x509util.go delete mode 100644 main.go create mode 100644 pkg/crtutil/crt_util.go delete mode 100644 pkg/monitor/chunks.go delete mode 100644 pkg/monitor/errors.go delete mode 100644 pkg/monitor/matcher.go delete mode 100644 pkg/monitor/messages.go delete mode 100644 pkg/monitor/monitor.go create mode 100644 pkg/policy/node.go create mode 100644 pkg/policy/policy.go create mode 100644 pkg/policy/wildcard.go delete mode 100644 pkg/server/handler.go delete mode 100644 pkg/server/messages.go delete mode 100644 pkg/server/nodes.go delete mode 100644 pkg/server/server.go create mode 100644 pkg/storage/errors.go create mode 100644 pkg/storage/index/index.go create mode 100644 pkg/storage/index/inmem.go create mode 100644 pkg/storage/loglist/loglist.go create mode 100644 pkg/storage/loglist/metadata.go create mode 100644 pkg/storage/storage.go create mode 100644 pkg/submission/submission.go diff --git a/README.md b/README.md index 44b2da0..957fb9e 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,8 @@ An implementation of a silent Certificate Transparency (CT) monitor. **Status:** work in progress, please do not use for anything serious. +FIXME: update readme based on latest revision. + ## How it works Each node that issues TLS certificates submit them to a self-hosted monitor. diff --git a/cmd/silent-ctmoon/main.go b/cmd/silent-ctmoon/main.go new file mode 100644 index 0000000..436ea64 --- /dev/null +++ b/cmd/silent-ctmoon/main.go @@ -0,0 +1,217 @@ +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "log" + "os" + "os/signal" + "strings" + "sync" + "syscall" + "time" + + "rgdd.se/silent-ct/internal/feedback" + "rgdd.se/silent-ct/internal/flagopt" + "rgdd.se/silent-ct/internal/ioutil" + "rgdd.se/silent-ct/internal/logger" + "rgdd.se/silent-ct/internal/manager" + "rgdd.se/silent-ct/internal/monitor" + "rgdd.se/silent-ct/pkg/policy" +) + +const usage = ` +A utility that follows relevant Certificate Transparency logs to +discover certificates that may be mis-issued. To be silent, any +legitimately issued certificates are pulled from trusted nodes. + +Usage: + + silent-ctmoon --help + silent-ctmoon [Opts] -d DIRECTORY -f POLICY_FILE + +Options: + + -h, --help: Output usage message and exit + -v, --verbosity: Leveled logging output (default: NOTICE) + + -b, --bootstrap: Initializate a new state directory (Default: false) + -c, --contact: A string that helps log operators know who you are (Default: "") + -d, --directory: Path to a directory where all state will be stored + -e, --please-exit Toggle to only run until up-to-date (Default: false) + -f, --policy-file: Path to the monitor's policy file in JSON format + -o, --output-file File that all output will be written to (Default: stdout) + -p, --pull-interval: How often nodes are pulled for certificates (Default: 15m) + -w, --num-workers: Number of parallel workers to fetch each log with (Default: 2) +` + +type config struct { + // Options + verbosity string + bootstrap bool + contact string + directory string + pleaseExit bool + policyFile string + outputFile string + pullInterval time.Duration + numWorkers uint + + // Extracted + log logger.Logger + policy policy.Policy +} + +func configure(cmd string, args []string) (cfg config, err error) { + fs := flag.NewFlagSet(cmd, flag.ContinueOnError) + fs.Usage = func() {} + flagopt.StringOpt(fs, &cfg.verbosity, "verbosity", "v", logger.LevelNotice.String()) + flagopt.BoolOpt(fs, &cfg.bootstrap, "bootstrap", "b", false) + flagopt.StringOpt(fs, &cfg.contact, "contact", "c", "") + flagopt.StringOpt(fs, &cfg.directory, "directory", "d", "") + flagopt.BoolOpt(fs, &cfg.pleaseExit, "please-exit", "e", false) + flagopt.StringOpt(fs, &cfg.policyFile, "policy-file", "f", "") + flagopt.StringOpt(fs, &cfg.outputFile, "output-file", "o", "") + flagopt.DurationOpt(fs, &cfg.pullInterval, "pull-interval", "p", 15*time.Minute) + flagopt.UintOpt(fs, &cfg.numWorkers, "num-workers", "w", 2) + if err = fs.Parse(args); err != nil { + return cfg, err + } + + // Options + lv, err := logger.NewLevel(cfg.verbosity) + if err != nil { + return cfg, fmt.Errorf("invalid verbosity: %v", err) + } + if cfg.directory == "" { + return cfg, fmt.Errorf("directory is a required option") + } + if cfg.policyFile == "" { + return cfg, fmt.Errorf("policy file is a required option") + } + if cfg.numWorkers == 0 || cfg.numWorkers > 4 { + return cfg, fmt.Errorf("number of workers must be in [1, 4]") + } + cfg.log = logger.New(logger.Config{Level: lv, File: os.Stdout}) + if err := ioutil.ReadJSON(cfg.policyFile, &cfg.policy); err != nil { + return cfg, err + } + if len(cfg.policy.Monitor) == 0 { + return cfg, fmt.Errorf("policy: need at least one wildcard to monitor") + } + + // Arguments + if len(fs.Args()) != 0 { + return cfg, fmt.Errorf("trailing arguments are not permitted") + } + + return cfg, nil +} + +func main() { + cfg, err := configure(os.Args[0], os.Args[1:]) + if err != nil { + if errors.Is(err, flag.ErrHelp) { + fmt.Fprintf(os.Stderr, "%s", usage[1:]) + os.Exit(0) + } + if !strings.Contains(err.Error(), "flag provided but not defined") { + fmt.Fprintf(os.Stderr, "%v\n", err) + } + os.Exit(1) + } + + feventCh := make(chan []feedback.Event) + defer close(feventCh) + + mconfigCh := make(chan monitor.MonitoredLog) + defer close(mconfigCh) + + meventCh := make(chan monitor.Event) + defer close(meventCh) + + errorCh := make(chan error) + defer close(errorCh) + + mgr, err := manager.New(manager.Config{ + Policy: cfg.policy, + Bootstrap: cfg.bootstrap, + Directory: cfg.directory, + Logger: cfg.log, + AlertDelay: cfg.pullInterval * 3 / 2, + }, feventCh, meventCh, mconfigCh, errorCh) + if err != nil { + cfg.log.Dief("manager: %v\n", err) + } + mon, err := monitor.New(monitor.Config{ + Matcher: &cfg.policy.Monitor, + Logger: cfg.log, + Contact: cfg.contact, + NumWorkers: cfg.numWorkers, + }, meventCh, mconfigCh, errorCh) + if err != nil { + cfg.log.Dief("monitor: %v\n", err) + } + fb, err := feedback.New(feedback.Config{ + Policy: cfg.policy, + Logger: cfg.log, + PullInterval: cfg.pullInterval, + }, feventCh) + if err != nil { + cfg.log.Dief("feedback: %v\n", err) + } + + if cfg.bootstrap { + os.Exit(0) + } + if cfg.pleaseExit { + cfg.log.Dief("the --please-exit option is not supported yet\n") + } + + var wg sync.WaitGroup + ctx, cancel := context.WithCancel(context.Background()) + + wg.Add(1) + go func() { + defer wg.Done() + defer cancel() + await(ctx) + }() + + wg.Add(1) + go func() { + defer wg.Done() + defer cancel() + mon.RunForever(ctx) + }() + + wg.Add(1) + go func() { + defer wg.Done() + defer cancel() + fb.RunForever(ctx) + }() + + os.Exit(func() int { + defer wg.Wait() + defer cancel() + if err := mgr.Run(ctx); err != nil { + log.Fatalf("manager: %v\n", err) + return 1 + } + return 0 + }()) +} + +func await(ctx context.Context) { + sigs := make(chan os.Signal, 1) + defer close(sigs) + + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + select { + case <-sigs: + case <-ctx.Done(): + } +} diff --git a/cmd/silent-ctnode/main.go b/cmd/silent-ctnode/main.go new file mode 100644 index 0000000..fec088c --- /dev/null +++ b/cmd/silent-ctnode/main.go @@ -0,0 +1,127 @@ +package main + +import ( + "errors" + "flag" + "fmt" + "os" + "strings" + + "rgdd.se/silent-ct/internal/flagopt" + "rgdd.se/silent-ct/internal/ioutil" + "rgdd.se/silent-ct/internal/logger" + "rgdd.se/silent-ct/pkg/crtutil" + "rgdd.se/silent-ct/pkg/policy" + "rgdd.se/silent-ct/pkg/submission" +) + +const usage = ` +A utility that generates a submission of one or more certificate chains. +The generated submission is protected by a message authentication code. + +Usage: + + silent-ctnode --help + silent-ctnode [Options] -n NAME -s SECRET FILE [FILE ...] + +Options: + + -h, --help: Output usage message and exit + -v, --verbosity Leveled logging output (default: NOTICE) + + -n, --name: Name of the node generating the submission + -s, --secret: Shared secret between the node and its monitor + -o, --output: File to write submission to (default: stdout) + +Each trailing FILE argument must contain a single certificate chain. +` + +type config struct { + // Options + verbosity string + name string + secret string + output string + + // Extracted + log logger.Logger + files []string +} + +func configure(cmd string, args []string) (cfg config, err error) { + fs := flag.NewFlagSet(cmd, flag.ContinueOnError) + fs.Usage = func() {} + flagopt.StringOpt(fs, &cfg.verbosity, "verbosity", "v", logger.LevelNotice.String()) + flagopt.StringOpt(fs, &cfg.name, "name", "n", "") + flagopt.StringOpt(fs, &cfg.secret, "secret", "s", "") + flagopt.StringOpt(fs, &cfg.output, "output", "o", "") + if err = fs.Parse(args); err != nil { + return cfg, err + } + + // Options + lv, err := logger.NewLevel(cfg.verbosity) + if err != nil { + return cfg, fmt.Errorf("invalid verbosity: %v", err) + } + if cfg.name == "" { + return cfg, fmt.Errorf("node name is required") + } + if cfg.secret == "" { + return cfg, fmt.Errorf("node secret is required") + } + cfg.log = logger.New(logger.Config{Level: lv, File: os.Stderr}) + + // Arguments + cfg.files = fs.Args() + if len(cfg.files) == 0 { + return cfg, fmt.Errorf("at least one certificate chain file is required") + } + + return cfg, err +} + +func main() { + cfg, err := configure(os.Args[0], os.Args[1:]) + if err != nil { + if errors.Is(err, flag.ErrHelp) { + fmt.Fprintf(os.Stderr, "%s", usage[1:]) + os.Exit(0) + } + if !strings.Contains(err.Error(), "flag provided but not defined") { + fmt.Fprintf(os.Stderr, "%v\n", err) + } + os.Exit(1) + } + + var chains [][]byte + for i, path := range cfg.files { + b, err := ioutil.ReadData(path) + if err != nil { + cfg.log.Dief("file %d: %v\n", i, err) + } + if _, err := crtutil.CertificateChainFromPEM(b); err != nil { + cfg.log.Dief("file %d: %v\n", i, err) + } + + chains = append(chains, b) + } + + node, err := policy.NewNode(cfg.name, cfg.secret, "http://www.example.org/unused", nil) + if err != nil { + cfg.log.Dief("api: %v\n", err) + } + s, err := submission.New(node, chains) + if err != nil { + cfg.log.Dief("api: %v\n", err) + } + + fp := os.Stdout + if cfg.output != "" { + if fp, err = os.OpenFile(cfg.output, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil { + cfg.log.Dief("output: %v\n", err) + } + } + + fmt.Fprintf(fp, "%s", string(s)) +} diff --git a/docs/http-api.md b/docs/http-api.md deleted file mode 100644 index d78f2ee..0000000 --- a/docs/http-api.md +++ /dev/null @@ -1,29 +0,0 @@ -# HTTP API - -The monitor listens for HTTP POST requests on a well-known endpoint. For -example, the well-known endpoint might be `https://example.com/add-chain` or -`http://exampled3jsb2t6n2f5f6r4v4gkqmqd7h4hjucwb7y5.onion/add-chain`. - -The HTTP POST request body should be an X.509v3 chain in PEM format. The first -certificate must be a leaf. The remaining certificates must be CA certificates. - -To authenticate the node adding a certificate chain to the monitor, the HTTP -authorization header needs to be present and carry a valid value. - - Authorization: TYPE NAME:VALUE - -`TYPE`: custom HTTP authorization type used by the monitor, set to "silent-ct". - -`NAME`: identifier that the monitor uses to locate the right pre-shared secret. - -`VALUE`: HMAC with SHA256 as the hash function for the entire HTTP POST request -body. The HMAC key is derived by the node and the monitor from the pre-shared -secret `SECRET`, node name `NAME`, and HTTP authorization type `TYPE`. In Go: - - hkdf := hkdf.New(sha256.New, SECRET, TYPE, NAME) - key := make([]byte, 16) - io.ReadFull(hkdf, key) - -On successful processing of a request, the monitor outputs HTTP 200 OK. If the -HMAC value is incorrect or the node is not allowed to request certificates for -the domain names in the request body, the monitor outputs HTTP 401 Unauthorized. diff --git a/docs/introduction.md b/docs/introduction.md index 65b0366..b0d7e71 100644 --- a/docs/introduction.md +++ b/docs/introduction.md @@ -69,10 +69,9 @@ To filter out certificates that are not relevant, the monitor is configured with a list of domains to match on. Only matching certificates will be stored, which means there are nearly no storage requirements to run this type of monitor. -To achieve the property of _silence_, the trusted nodes push legitimately issued -certificates to the monitor which listens for such requests using an HTTP API. -The monitor will use the feedback from these nodes to further filter the -downloaded certificates that matched based on which ones are legitimate. If any +To get the property of _silence_, the monitor pulls the trusted nodes +periodically (HTTP GET) for legitimately issued certificates. The monitor will +use this feedback to filter the downloaded certificates that matched. If any certificates are found that no node pushed to the monitor, an alert is created. The communication channel between the trusted nodes and the monitor can be @@ -83,30 +82,13 @@ Owning that the communication channel is insecure helps avoid misconfiguration. A pre-shared secret is used for each node to authenticate with the monitor. That secret is never shown on the wire (an HMAC key is derived from it), which means that all a man-in-the-middle attacker can do is replay and block messages. -Accounting for replayed messages is either way a good thing, because it makes it -easy to overcome temporary issues like a spotty network, a monitor reboot, etc. -In other words, nodes can periodically (re)submit their legitimate certificates. -Blocked messages can not be solved by cryptography and will result in alerts. - -To keep the attack surface of the monitor down, one may optionally choose to -operate it as a Tor onion service, restrict access using WireGuard, or similar. -Or in the case of a single-node deployment, run everything on the same system. - -## Open questions - - - What do we do about certificates that appear in the logs after time `T`, but - which were issued with a `NotBefore` timestamp that is before time `T`? We - want to minimize noise, but also not open up for CA backdating threats. - - What options for generating alerts should be supported other than stdout, if - any? Most deployments already have ways to integrate with dashboards/email. +"Replays" can happen either way because the monitor polls periodically, i.e., +the monitor needs to account for the fact that it may poll the same thing twice. +Blocking can not be solved by cryptography and would simply result in alerts. ## Further documentation - - [HTTP API](./http-api.md): read more about how the trusted nodes - authenticate with the monitor to submit legitimately issued certificates. - - [State directory](./state.md): read more about how the trusted monitor keeps - track of its state as plaintext files in a given directory. Staying away - from a database is meant to ease debugging, silencing of alerts, etc. +docdoc ## Future ideas diff --git a/docs/state.md b/docs/state.md deleted file mode 100644 index fbb965d..0000000 --- a/docs/state.md +++ /dev/null @@ -1,9 +0,0 @@ -# State - -All state is managed by the silent-ct [manager](../internal/manager). All such -state is persisted to disk in a common directory as a collection of files. Not -having any database simplifies setup and manual debugging, should it be needed. - -## Structure of the state directory - -docdoc diff --git a/docs/storage.md b/docs/storage.md new file mode 100644 index 0000000..a0616ed --- /dev/null +++ b/docs/storage.md @@ -0,0 +1,3 @@ +# Storage + +docdoc diff --git a/docs/submission.md b/docs/submission.md new file mode 100644 index 0000000..357f07a --- /dev/null +++ b/docs/submission.md @@ -0,0 +1,22 @@ +# Submission + +docdoc + +## Format + + NAME MAC + + silent-ct:separator + ... + + +`NAME`: identifier that the monitor uses to locate the right secret. + +`MAC`: HMAC with SHA256 as the hash function, computed for line two and forward. +The HMAC key is derived by the node and the monitor from their shared secret: + + hkdf := hkdf.New(sha256.New, SECRET, []byte("silent-ct"), NAME) + key := make([]byte, 16) + io.ReadFull(hkdf, key) + +``: certificate chain in PEM format the node considers legitimate. diff --git a/go.mod b/go.mod index 61e0b57..f0553fa 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/google/certificate-transparency-go v1.1.7 // indirect github.com/google/trillian v1.5.3 // indirect - gitlab.torproject.org/rgdd/ct v0.0.0-20230508072727-1d1808eac7db // indirect + gitlab.torproject.org/rgdd/ct v0.0.0-20240101071140-1b7c55dcd5ba // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/net v0.17.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect diff --git a/go.sum b/go.sum index b9acc1d..532c29b 100644 --- a/go.sum +++ b/go.sum @@ -11,6 +11,8 @@ github.com/google/trillian v1.5.3 h1:3ioA5p09qz+U9/t2riklZtaQdZclaStp0/eQNfewNRg github.com/google/trillian v1.5.3/go.mod h1:p4tcg7eBr7aT6DxrAoILpc3uXNfcuAvZSnQKonVg+Eo= gitlab.torproject.org/rgdd/ct v0.0.0-20230508072727-1d1808eac7db h1:wR2Fc+fRDBkpabB1og472+HCoNtE4TJEuAHbSG4EIEs= gitlab.torproject.org/rgdd/ct v0.0.0-20230508072727-1d1808eac7db/go.mod h1:dkEqBVulcsefxw3k5CX53bw88KUkTYcTRSJhMlj1Veg= +gitlab.torproject.org/rgdd/ct v0.0.0-20240101071140-1b7c55dcd5ba h1:ZoJTzvVdgbdejL6Ph2yJp/fkA/A6qJ+m0I8Xv6MDxFs= +gitlab.torproject.org/rgdd/ct v0.0.0-20240101071140-1b7c55dcd5ba/go.mod h1:dkEqBVulcsefxw3k5CX53bw88KUkTYcTRSJhMlj1Veg= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= diff --git a/internal/feedback/feedback.go b/internal/feedback/feedback.go new file mode 100644 index 0000000..77431e0 --- /dev/null +++ b/internal/feedback/feedback.go @@ -0,0 +1,127 @@ +package feedback + +import ( + "context" + "fmt" + "io" + "net/http" + "os" + "time" + + "rgdd.se/silent-ct/internal/logger" + "rgdd.se/silent-ct/pkg/crtutil" + "rgdd.se/silent-ct/pkg/policy" + "rgdd.se/silent-ct/pkg/submission" +) + +type Event struct { + NodeName string // Name of the node that generated a submission + PEMChain []byte // A certificate chain found in the submission +} + +type Config struct { + Policy policy.Policy + + // Optional + Logger logger.Logger // Debug and info prints only (no output by default) + PullInterval time.Duration // How often nodes are pulled via HTTP GET + HTTPTimeout time.Duration // Timeout to use when pulling nodes +} + +type Feedback struct { + cfg Config + nodes []policy.Node + eventCh chan []Event +} + +func New(cfg Config, eventCh chan []Event) (Feedback, error) { + if !cfg.Logger.IsConfigured() { + cfg.Logger = logger.New(logger.Config{Level: logger.LevelNotice, File: os.Stdout}) + } + if cfg.PullInterval == 0 { + cfg.PullInterval = 1 * time.Hour + } + if cfg.HTTPTimeout == 0 { + cfg.HTTPTimeout = 10 * time.Second + } + + for i, node := range cfg.Policy.Nodes { + if err := node.Validate(); err != nil { + return Feedback{}, fmt.Errorf("node %d: %v", i, err) + } + } + return Feedback{cfg: cfg, nodes: cfg.Policy.Nodes, eventCh: eventCh}, nil +} + +// RunForever collects legitimately issued certificates from nodes +func (fb *Feedback) RunForever(ctx context.Context) { + ticker := time.NewTicker(fb.cfg.PullInterval) + defer ticker.Stop() + + fb.pullOnce(ctx) + select { + case <-ticker.C: + fb.pullOnce(ctx) + case <-ctx.Done(): + return + } +} + +func (fb *Feedback) pullOnce(ctx context.Context) { + fb.cfg.Logger.Debugf("pull %d nodes\n", len(fb.nodes)) + for _, node := range fb.nodes { + data, err := fb.pull(ctx, node) + if err != nil { + fb.cfg.Logger.Debugf("failed to pull node %s: %v", node.Name, err) + continue + } + + var events []Event + for _, pemChain := range data { + chain, err := crtutil.CertificateChainFromPEM(pemChain) + if err != nil { + fb.cfg.Logger.Infof("failed to parse certificate from node %s: %v", node.Name, err) + continue + } + if err := node.Authorize(chain[0].DNSNames); err != nil { + fb.cfg.Logger.Infof("%s\n", err.Error()) + continue + } + + events = append(events, Event{NodeName: node.Name, PEMChain: pemChain}) + } + + fb.eventCh <- events + } +} + +func (fb *Feedback) pull(ctx context.Context, node policy.Node) ([][]byte, error) { + req, err := http.NewRequest(http.MethodGet, node.URL, nil) + if err != nil { + return nil, fmt.Errorf("new request: %v", err) + } + req.WithContext(ctx) + + cli := http.Client{Timeout: fb.cfg.HTTPTimeout} + rsp, err := cli.Do(req) + if err != nil { + return nil, fmt.Errorf("HTTP %s: %v", req.Method, err) + } + defer rsp.Body.Close() + + b, err := io.ReadAll(rsp.Body) + if err != nil { + return nil, fmt.Errorf("read response: %v", err) + } + if rsp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s: %s", rsp.Status, string(b)) + } + + s := submission.Submission(b) + data, err := s.Open(node) + if err != nil { + return nil, fmt.Errorf("open: %v", err) + } + + return data, nil +} diff --git a/internal/flagopt/flagopt.go b/internal/flagopt/flagopt.go new file mode 100644 index 0000000..484270d --- /dev/null +++ b/internal/flagopt/flagopt.go @@ -0,0 +1,26 @@ +package flagopt + +import ( + "flag" + "time" +) + +func BoolOpt(fs *flag.FlagSet, opt *bool, short, long string, value bool) { + fs.BoolVar(opt, short, value, "") + fs.BoolVar(opt, long, value, "") +} + +func DurationOpt(fs *flag.FlagSet, opt *time.Duration, short, long string, value time.Duration) { + fs.DurationVar(opt, short, value, "") + fs.DurationVar(opt, long, value, "") +} + +func UintOpt(fs *flag.FlagSet, opt *uint, short, long string, value uint) { + fs.UintVar(opt, short, value, "") + fs.UintVar(opt, long, value, "") +} + +func StringOpt(fs *flag.FlagSet, opt *string, short, long, value string) { + fs.StringVar(opt, short, value, "") + fs.StringVar(opt, long, value, "") +} diff --git a/internal/ioutil/ioutil.go b/internal/ioutil/ioutil.go new file mode 100644 index 0000000..7fe6cfc --- /dev/null +++ b/internal/ioutil/ioutil.go @@ -0,0 +1,56 @@ +package ioutil + +import ( + "encoding/json" + "fmt" + "os" +) + +func CommitData(path string, data []byte) error { + return os.WriteFile(path, data, 0644) // FIXME: use safefile package for atomic file writes +} + +func ReadData(path string) ([]byte, error) { + return os.ReadFile(path) +} + +func CommitJSON(path string, obj any) error { + b, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + return CommitData(path, b) +} + +func ReadJSON(path string, obj any) error { + b, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("%s: %v", path, err) + } + return json.Unmarshal(b, obj) +} + +func CreateDirectories(paths []string) error { + for _, path := range paths { + if err := os.Mkdir(path, 0755); err != nil { + return err + } + } + return nil +} + +func DirectoriesExist(paths []string) error { + for _, path := range paths { + info, err := os.Stat(path) + if os.IsNotExist(err) { + return fmt.Errorf("directory does not exist: %s", path) + } + if err != nil { + return err + } + if !info.IsDir() { + return fmt.Errorf("%s: is not a directory", path) + } + } + return nil +} diff --git a/internal/logger/logger.go b/internal/logger/logger.go new file mode 100644 index 0000000..195ad3e --- /dev/null +++ b/internal/logger/logger.go @@ -0,0 +1,96 @@ +package logger + +import ( + "fmt" + "log" + "os" + "sync" +) + +const ( + LevelDebug Level = iota + 1 + LevelInfo + LevelNotice + LevelFatal +) + +type Level int + +func NewLevel(str string) (Level, error) { + switch str { + case LevelDebug.String(): + return LevelDebug, nil + case LevelInfo.String(): + return LevelInfo, nil + case LevelNotice.String(): + return LevelNotice, nil + case LevelFatal.String(): + return LevelFatal, nil + } + return Level(0), fmt.Errorf("unknown level %q", str) +} + +func (lv Level) String() string { + switch lv { + case LevelDebug: + return "DEBUG" + case LevelInfo: + return "INFO" + case LevelNotice: + return "NOTICE" + case LevelFatal: + return "FATAL" + default: + return "UNKNOWN" + } +} + +type Config struct { + Level Level + File *os.File +} + +type Logger struct { + cfg Config + log log.Logger + mutex sync.Mutex +} + +func New(cfg Config) (l Logger) { + l.Reconfigure(cfg) + return +} + +func (l *Logger) Reconfigure(cfg Config) { + l.mutex.Lock() + defer l.mutex.Unlock() + + if cfg.Level < LevelDebug || cfg.Level > LevelFatal { + cfg.Level = LevelNotice + } + if cfg.File == nil { + cfg.File = os.Stdout + } + + l.cfg = cfg + l.log = *log.New(l.cfg.File, "", log.Ldate|log.Ltime) +} + +func (l *Logger) IsConfigured() bool { + return l.cfg.File != nil +} + +func (l *Logger) Debugf(format string, args ...interface{}) { l.printf(LevelDebug, format, args...) } +func (l *Logger) Infof(format string, args ...interface{}) { l.printf(LevelInfo, format, args...) } +func (l *Logger) Noticef(format string, args ...interface{}) { l.printf(LevelNotice, format, args...) } +func (l *Logger) Fatalf(format string, args ...interface{}) { l.printf(LevelFatal, format, args...) } +func (l *Logger) Dief(format string, args ...interface{}) { l.Fatalf(format, args...); os.Exit(1) } + +func (l *Logger) printf(lv Level, format string, args ...interface{}) { + l.mutex.Lock() + defer l.mutex.Unlock() + + if l.cfg.Level <= lv { + l.log.Printf("["+lv.String()+"] "+format, args...) + } +} diff --git a/internal/manager/helpers.go b/internal/manager/helpers.go deleted file mode 100644 index a9a2158..0000000 --- a/internal/manager/helpers.go +++ /dev/null @@ -1,52 +0,0 @@ -package manager - -import ( - "crypto/sha256" - "encoding/base64" - "fmt" - - ct "github.com/google/certificate-transparency-go" - "gitlab.torproject.org/rgdd/ct/pkg/metadata" - "rgdd.se/silent-ct/pkg/monitor" -) - -func selectLogs(m metadata.Metadata) []monitor.MessageLogConfig { - var logs []monitor.MessageLogConfig - for _, operator := range m.Operators { - for _, log := range operator.Logs { - if log.State == nil { - continue // ignore logs without a state (should not happen) - } - if log.State.Name == metadata.LogStatePending { - continue // log is not yet relevant - } - if log.State.Name == metadata.LogStateRetired { - continue // log is not expected to be reachable - } - if log.State.Name == metadata.LogStateRejected { - continue // log is not expected to be reachable - } - - // FIXME: remove me instead of hard coding Argon 2024 - id, _ := log.Key.ID() - got := fmt.Sprintf("%s", base64.StdEncoding.EncodeToString(id[:])) - want := "7s3QZNXbGs7FXLedtM0TojKHRny87N7DUUhZRnEftZs=" - if got != want { - continue - } - - logs = append(logs, monitor.MessageLogConfig{ - Metadata: log, - State: monitor.MonitorState{ - LogState: monitor.LogState{ct.SignedTreeHead{ - SHA256RootHash: [sha256.Size]byte{47, 66, 110, 15, 246, 154, 8, 100, 150, 140, 206, 208, 17, 57, 112, 116, 210, 3, 19, 55, 46, 63, 209, 12, 234, 130, 225, 124, 237, 2, 64, 228}, - TreeSize: 610650601, - Timestamp: 1702108968538, - }}, - NextIndex: 388452203, - }, - }) - } - } - return logs -} diff --git a/internal/manager/manager.go b/internal/manager/manager.go index 33207e9..bc216c1 100644 --- a/internal/manager/manager.go +++ b/internal/manager/manager.go @@ -2,93 +2,208 @@ package manager import ( "context" - "encoding/json" + "errors" "fmt" "os" "time" "gitlab.torproject.org/rgdd/ct/pkg/metadata" - "rgdd.se/silent-ct/pkg/monitor" - "rgdd.se/silent-ct/pkg/server" -) - -const ( - DefaultStateDir = "/home/rgdd/.local/share/silent-ct" // FIXME - DefaultMetadataRefreshInterval = 1 * time.Hour + "rgdd.se/silent-ct/internal/feedback" + "rgdd.se/silent-ct/internal/logger" + "rgdd.se/silent-ct/internal/monitor" + "rgdd.se/silent-ct/pkg/policy" + "rgdd.se/silent-ct/pkg/storage" ) type Config struct { - StateDir string - Nodes server.Nodes + Policy policy.Policy + Bootstrap bool // Whether a new storage should be initialized from scratch + Directory string // Path to a directory where everything will be stored - MetadataRefreshInterval time.Duration + // Optional + Logger logger.Logger // Where to output messages and with what verbosity + AlertDelay time.Duration // Time before alerting on certificates that are unaccounted for + MetadataRefreshInterval time.Duration // How often to update the list of monitored logs + ShutdownTimeout time.Duration // Force shutdown after this timeout (FIXME: should not be needed) } type Manager struct { Config + storage.Storage + + feventCh chan []feedback.Event + meventCh chan monitor.Event + mconfigCh chan monitor.MonitoredLog + errorCh chan error } -func New(cfg Config) (Manager, error) { - if cfg.StateDir == "" { - cfg.StateDir = DefaultStateDir +func New(cfg Config, fch chan []feedback.Event, mch chan monitor.Event, cch chan monitor.MonitoredLog, ech chan error) (Manager, error) { + if !cfg.Logger.IsConfigured() { + cfg.Logger = logger.New(logger.Config{Level: logger.LevelNotice, File: os.Stdout}) } if cfg.MetadataRefreshInterval == 0 { - cfg.MetadataRefreshInterval = DefaultMetadataRefreshInterval + cfg.MetadataRefreshInterval = 1 * time.Hour + } + if cfg.ShutdownTimeout == 0 { + cfg.ShutdownTimeout = 1 * time.Second // FIXME: increase } - return Manager{Config: cfg}, nil -} - -func (mgr *Manager) Run(ctx context.Context, - serverCh chan server.MessageNodeSubmission, - monitorCh chan monitor.MessageLogProgress, - configCh chan []monitor.MessageLogConfig, - errorCh chan error) error { - md, err := mgr.metadataRead() + s, err := storage.New(storage.Config{ + Bootstrap: cfg.Bootstrap, + Directory: cfg.Directory, + AlertDelay: cfg.AlertDelay, + StaticLogs: cfg.Policy.StaticLogs, + RemoveLogs: cfg.Policy.RemoveLogs, + }) if err != nil { - return fmt.Errorf("read metadata: %v\n", err) + return Manager{}, err + } + + for _, log := range s.LogList.Generate() { + state, err := s.BootstrapLog(context.Background(), log, cfg.Bootstrap) + if errors.Is(err, storage.ErrorMonitorStateExists) { + continue + } + if err != nil { + return Manager{}, err + } + cfg.Logger.Infof("bootstrapping log %s at next index %d\n", log.URL, state.NextIndex) + } + + return Manager{Config: cfg, Storage: s, feventCh: fch, meventCh: mch, mconfigCh: cch, errorCh: ech}, nil +} + +func (mgr *Manager) Run(ctx context.Context) error { + if err := mgr.startupConfig(); err != nil { + return fmt.Errorf("unable to do startup config: %v", err) } - configCh <- selectLogs(md) - ticker := time.NewTicker(mgr.MetadataRefreshInterval) - defer ticker.Stop() + metadataTicker := time.NewTicker(mgr.MetadataRefreshInterval) + defer metadataTicker.Stop() + shutdown := false for { select { - case <-ctx.Done(): - return nil - case <-ticker.C: - mu, err := mgr.metadataUpdate(ctx, md) - if err != nil { - continue + case <-metadataTicker.C: + if err := mgr.metadataJob(ctx); err != nil { + return fmt.Errorf("unable to run metadata job: %v", err) + } + case ev := <-mgr.meventCh: + if err := mgr.monitorJob(ev); err != nil { + return fmt.Errorf("unable to run monitor job: %v", err) + } + if err := mgr.alertJob(); err != nil { + return fmt.Errorf("unable to run alert job: %v\n", err) } - if mu.Version.Major <= md.Version.Major { + case ev := <-mgr.feventCh: + if err := mgr.feedbackJob(ev); err != nil { + return fmt.Errorf("unable to run server job: %v", err) + } + if err := mgr.alertJob(); err != nil { + return fmt.Errorf("unable to run alert job: %v\n", err) + } + case err := <-mgr.errorCh: + if err := mgr.errorJob(err); err != nil { + return fmt.Errorf("unable to run error job: %v", err) + } + case <-ctx.Done(): + if !shutdown { + shutdown = true + mgr.Logger.Noticef("shutdown scheduled in %v\n", mgr.ShutdownTimeout) + + // defer shutdown so that all channels can be drained + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(context.Background(), mgr.ShutdownTimeout) + defer cancel() continue } - md = mu - configCh <- selectLogs(md) - case ev := <-monitorCh: - fmt.Printf("DEBUG: received event from monitor with %d matches\n", len(ev.Matches)) - case ev := <-serverCh: - fmt.Printf("DEBUG: received event from server: %v\n", ev) - case err := <-errorCh: - fmt.Printf("DEBUG: received error: %v\n", err) + + mgr.Logger.Debugf("manager shutdown\n") + os.Exit(0) // FIXME: return nil without hanging, unpredictable gh.com/google/ct-go fetcher shutdown? } } } -func (mgr *Manager) metadataRead() (metadata.Metadata, error) { - b, err := os.ReadFile(mgr.StateDir + "/metadata.json") +func (mgr *Manager) startupConfig() error { + mgr.Logger.Debugf("startup configuring contains %d logs\n", len(mgr.Storage.LogList.Generate())) + for _, log := range mgr.Storage.LogList.Generate() { + state, err := mgr.GetMonitorState(log) + if err != nil { + return err + } + mgr.mconfigCh <- monitor.MonitoredLog{Config: log, State: state} + } + return nil +} + +func (mgr *Manager) metadataJob(ctx context.Context) error { + mgr.Logger.Debugf("running metadata job\n") + added, removed, err := mgr.LogList.Update(ctx) + if err != nil { + if mgr.LogList.IsStale() { + return fmt.Errorf("unable to update log list which is now stale: %v", err) + } + } + + mgr.removeLogs(removed) + mgr.addLogs(ctx, added) + return nil +} + +func (mgr *Manager) removeLogs(logs []metadata.Log) { + mgr.Logger.Debugf("removing %d logs\n", len(logs)) + for _, log := range logs { + state, _ := mgr.GetMonitorState(log) + mgr.Logger.Infof("removing log %s with %d entries in its backlog\n", log.URL, state.TreeSize-state.NextIndex) + mgr.mconfigCh <- monitor.MonitoredLog{Config: log} + } +} + +func (mgr *Manager) addLogs(ctx context.Context, logs []metadata.Log) { + mgr.Logger.Debugf("adding %d logs\n", len(logs)) + for _, log := range logs { + state, err := mgr.BootstrapLog(ctx, log, false) + if errors.Is(err, storage.ErrorMonitorStateExists) { + mgr.Logger.Infof("adding log %s with existing state on disk\n", log.URL) + } else if err != nil { + mgr.Logger.Noticef("restart required: failed to bootstrap new log %s: %v\n", log.URL, err) + } else { + mgr.Logger.Infof("bootstrapping log %s at next index 0\n", log.URL) + } + mgr.mconfigCh <- monitor.MonitoredLog{Config: log, State: state} + } +} + +func (mgr *Manager) feedbackJob(events []feedback.Event) error { + mgr.Logger.Debugf("received feedback with %d events", len(events)) + for _, ev := range events { + if err := mgr.AddChain(ev.NodeName, ev.PEMChain); err != nil { + return err + } + } + return nil +} + +func (mgr *Manager) monitorJob(msg monitor.Event) error { + mgr.Logger.Debugf("new state for %s\n", msg.Summary()) + if err := mgr.AddEntries(msg.State.LogID, msg.Matches); err != nil { + return err + } + return mgr.SetMonitorState(msg.State.LogID, msg.State) +} + +func (mgr *Manager) alertJob() error { + alerts, err := mgr.Index.TriggerAlerts() if err != nil { - return metadata.Metadata{}, err + return err } - var md metadata.Metadata - if err := json.Unmarshal(b, &md); err != nil { - return metadata.Metadata{}, err + for _, alert := range alerts { + mgr.Logger.Noticef("certificate mis-issuance? No node submitted certificate %s\n", alert.StoredAt) } - return md, nil + return nil } -func (mgr *Manager) metadataUpdate(ctx context.Context, old metadata.Metadata) (metadata.Metadata, error) { - return metadata.Metadata{}, fmt.Errorf("TODO: update metadata") +func (mgr *Manager) errorJob(err error) error { + mgr.Logger.Debugf("received error: %v\n", err) + return nil } diff --git a/internal/merkle/TODO b/internal/merkle/TODO deleted file mode 100644 index 46cc0cb..0000000 --- a/internal/merkle/TODO +++ /dev/null @@ -1 +0,0 @@ -Drop this package, fix the minor edit in upstream. diff --git a/internal/merkle/compact.go b/internal/merkle/compact.go deleted file mode 100644 index 6eeabd0..0000000 --- a/internal/merkle/compact.go +++ /dev/null @@ -1,115 +0,0 @@ -// BSD 2-Clause License -// -// Copyright (c) 2022, the ct authors -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// From: -// https://gitlab.torproject.org/rgdd/ct/-/tree/main/pkg/merkle -// -// The only difference is that leaf hashes rather than leaf data are passed as -// input to TreeHeadFromRangeProof, thus also changing the nodes() helper. -package merkle - -import ( - "crypto/sha256" - "fmt" -) - -// node represents a subtree at some level and a particular index -type node struct { - index uint64 - hash [sha256.Size]byte -} - -// nodes returns a list of consecutive leaf hashes -func nodes(index uint64, leafHashes [][sha256.Size]byte) (n []node) { - for i, lh := range leafHashes { - n = append(n, node{index + uint64(i), lh}) - } - return -} - -// compactRange outputs the minimal number of fixed subtree hashes given a -// non-empty list of consecutive leaves that start from a non-zero index. For a -// definition of this algorithm, see the end of ../../doc/tlog_algorithms.md. -func compactRange(nodes []node) [][sha256.Size]byte { - // Step 1 - var hashes [][sha256.Size]byte - - // Step 2 - for len(nodes) > 1 { - // Step 2a - if xor(nodes[1].index, 1) != nodes[0].index { - hashes = append(hashes, nodes[0].hash) - nodes = nodes[1:] - } - - // Step 2b; Step 2c; Step 2c(iii) - for i := 0; i < len(nodes); i++ { - // Step 2c(i) - if i+1 != len(nodes) { - nodes[i].hash = HashInteriorNode(nodes[i].hash, nodes[i+1].hash) - nodes = append(nodes[:i+1], nodes[i+2:]...) - } - - // Step 2c(ii) - nodes[i].index = rshift(nodes[i].index) - } - } - - // Step 3 - return append(hashes, nodes[0].hash) -} - -// TreeHeadFromRangeProof computes a tree head at size n=len(leafHashes)+index -// if given a list of leaf hashes at indices index,...,n-1 as well as an -// inclusion proof for the first leaf in the tree of size n. This allows a -// verifier to check inclusion of one or more log entries with a single -// inclusion proof. -func TreeHeadFromRangeProof(leafHashes [][sha256.Size]byte, index uint64, proof [][sha256.Size]byte) (root [sha256.Size]byte, err error) { - var cr [][sha256.Size]byte - confirmHash := func(h [sha256.Size]byte) error { - if h != cr[0] { - return fmt.Errorf("aborted due incorrect right-node subtree hash") - } - cr = cr[1:] - return nil - } - copyRoot := func(r [sha256.Size]byte) error { - root = r - return nil - } - - if len(leafHashes) == 0 { - return [sha256.Size]byte{}, fmt.Errorf("need at least one leaf to recompute tree head from proof") - } - if len(leafHashes) > 1 { - cr = compactRange(nodes(index+1, leafHashes[1:])) - } - return root, inclusion(leafHashes[0], index, index+uint64(len(leafHashes)), proof, copyRoot, confirmHash) -} - -func xor(a, b uint64) uint64 { - return a ^ b -} diff --git a/internal/merkle/merkle.go b/internal/merkle/merkle.go deleted file mode 100644 index 872364f..0000000 --- a/internal/merkle/merkle.go +++ /dev/null @@ -1,271 +0,0 @@ -// BSD 2-Clause License -// -// Copyright (c) 2022, the ct authors -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// From: -// https://gitlab.torproject.org/rgdd/ct/-/tree/main/pkg/merkle -package merkle - -import ( - "crypto/sha256" - "fmt" -) - -// HashEmptyTree computes the hash of an empty tree. See RFC 6162, §2.1: -// -// MTH({}) = SHA-256() -func HashEmptyTree() [sha256.Size]byte { - return sha256.Sum256(nil) -} - -// HashLeafNode computes the hash of a leaf's data. See RFC 6162, §2.1: -// -// MTH({d(0)}) = SHA-256(0x00 || d(0)) -func HashLeafNode(data []byte) (hash [sha256.Size]byte) { - h := sha256.New() - h.Write([]byte{0x00}) - h.Write(data) - copy(hash[:], h.Sum(nil)) - return -} - -// HashInteriorNode computes the hash of an interior node. See RFC 6962, §2.1: -// -// MTH(D[n]) = SHA-256(0x01 || MTH(D[0:k]) || MTH(D[k:n]) -func HashInteriorNode(left, right [sha256.Size]byte) (hash [sha256.Size]byte) { - h := sha256.New() - h.Write([]byte{0x01}) - h.Write(left[:]) - h.Write(right[:]) - copy(hash[:], h.Sum(nil)) - return -} - -// inclusion implements the algorithm specified in RFC 9162, Section 2.1.3.2. -// In addition, the caller is allowed to confirm right-node subtree hashes. -func inclusion(leaf [sha256.Size]byte, index, size uint64, proof [][sha256.Size]byte, - confirmRoot func([sha256.Size]byte) error, confirmHash func([sha256.Size]byte) error) error { - // Step 1 - if index >= size { - return fmt.Errorf("leaf index must be in [%d, %d]", 0, size-1) - } - - // Step 2 - fn := index - sn := size - 1 - - // Step 3 - r := leaf - - // Step 4 - for i, p := range proof { - // Step 4a - if sn == 0 { - return fmt.Errorf("reached tree head with %d remaining proof hash(es)", len(proof[i:])) - } - - // Step 4b - if isLSB(fn) || fn == sn { - // Step 4b, i - r = HashInteriorNode(p, r) - - // Step 4b, ii - if !isLSB(fn) { - for { - fn = rshift(fn) - sn = rshift(sn) - - if isLSB(fn) || fn == 0 { - break - } - } - } - } else { - // Step 4b, i - r = HashInteriorNode(r, p) - - // Extension: allow the caller to confirm right-node subtree hashes - if err := confirmHash(p); err != nil { - return fmt.Errorf("subtree index %d: %v", fn, err) - } - } - - // Step 4c - fn = rshift(fn) - sn = rshift(sn) - } - - // Step 5 - if sn != 0 { - return fmt.Errorf("stopped at subtree with index %d due to missing proof hashes", fn) - } - return confirmRoot(r) -} - -// consistency implements the algorithm specified in RFC 9162, §2.1.4.2 -func consistency(oldSize, newSize uint64, oldRoot, newRoot [sha256.Size]byte, proof [][sha256.Size]byte) error { - // Step 1 - if len(proof) == 0 { - return fmt.Errorf("need at least one proof hash") - } - - // Step 2 - if isPOW2(oldSize) { - proof = append([][sha256.Size]byte{oldRoot}, proof...) - } - - // Step 3 - fn := oldSize - 1 - sn := newSize - 1 - - // Step 4 - for isLSB(fn) { - fn = rshift(fn) - sn = rshift(sn) - } - - // Step 5 - fr := proof[0] - sr := proof[0] - - // Step 6 - for i, c := range proof[1:] { - // Step 6a - if sn == 0 { - return fmt.Errorf("reached tree head with %d remaining proof hash(es)", len(proof[i+1:])) - } - - // Step 6b - if isLSB(fn) || fn == sn { - // Step 6b, i - fr = HashInteriorNode(c, fr) - // Step 6b, ii - sr = HashInteriorNode(c, sr) - // Step 6b, iii - if !isLSB(fn) { - for { - fn = rshift(fn) - sn = rshift(sn) - - if isLSB(fn) || fn == 0 { - break - } - } - } - } else { - // Step 6b, i - sr = HashInteriorNode(sr, c) - } - - // Step 6c - fn = rshift(fn) - sn = rshift(sn) - } - - // Step 7 - if sn != 0 { - return fmt.Errorf("stopped at subtree with index %d due to missing proof hashes", fn) - } - if fr != oldRoot { - return fmt.Errorf("recomputed old tree head %x is not equal to reference tree head %x", fr[:], oldRoot[:]) - } - if sr != newRoot { - return fmt.Errorf("recomputed new tree head %x is not equal to reference tree head %x", sr[:], newRoot[:]) - } - return nil -} - -// VerifyInclusion verifies that a leaf's data is commited at a given index in a -// reference tree -func VerifyInclusion(data []byte, index, size uint64, root [sha256.Size]byte, proof [][sha256.Size]byte) error { - if size == 0 { - return fmt.Errorf("tree size must be larger than zero") - } - - confirmHash := func(h [sha256.Size]byte) error { return nil } // No compact range extension - confirmRoot := func(r [sha256.Size]byte) error { - if r != root { - return fmt.Errorf("recomputed tree head %x is not equal to reference tree head %x", r[:], root[:]) - } - return nil - } - return inclusion(HashLeafNode(data), index, size, proof, confirmRoot, confirmHash) -} - -// VerifyConsistency verifies that an an old tree is consistent with a new tree -func VerifyConsistency(oldSize, newSize uint64, oldRoot, newRoot [sha256.Size]byte, proof [][sha256.Size]byte) error { - checkTree := func(size uint64, root [sha256.Size]byte) error { - if size == 0 { - if root != HashEmptyTree() { - return fmt.Errorf("non-empty tree head %x for size zero", root[:]) - } - if len(proof) != 0 { - return fmt.Errorf("non-empty proof with %d hashes for size zero", len(proof)) - } - } else if root == HashEmptyTree() { - return fmt.Errorf("empty tree head %x for tree size %d", root[:], size) - } - return nil - } - - if err := checkTree(oldSize, oldRoot); err != nil { - return fmt.Errorf("old: %v", err) - } - if err := checkTree(newSize, newRoot); err != nil { - return fmt.Errorf("new: %v", err) - } - if oldSize == 0 { - return nil - } - - if oldSize == newSize { - if oldRoot != newRoot { - return fmt.Errorf("different tree heads %x and %x with equal tree size %d", oldRoot, newRoot, oldSize) - } - if len(proof) != 0 { - return fmt.Errorf("non-empty proof with %d hashes for equal tree size %d", len(proof), oldSize) - } - return nil - } - if oldSize > newSize { - return fmt.Errorf("old tree size %d must be smaller than or equal to the new tree size %d", oldSize, newSize) - } - - return consistency(oldSize, newSize, oldRoot, newRoot, proof) -} - -// isLSB returns true if the least significant bit of num is set -func isLSB(num uint64) bool { - return (num & 1) != 0 -} - -// isPOW2 returns true if num is a power of two (1, 2, 4, 8, ...) -func isPOW2(num uint64) bool { - return (num & (num - 1)) == 0 -} - -func rshift(num uint64) uint64 { - return num >> 1 -} diff --git a/internal/monitor/chunks.go b/internal/monitor/chunks.go new file mode 100644 index 0000000..02b3802 --- /dev/null +++ b/internal/monitor/chunks.go @@ -0,0 +1,89 @@ +package monitor + +// +// A min heap of chunks, ordered on each chunk's start index. This makes it +// easy to order the downloaded leaves when using multiple parallell fetchers. +// +// Credit: inspiration to use a heap from Aaron Gable, see +// https://github.com/aarongable/ctaudit +// + +import ( + "container/heap" + "crypto/sha256" +) + +type chunk struct { + startIndex uint64 // Index of the first leaf + leafHashes [][sha256.Size]byte // List of consecutive leaf hashes + matches []LogEntry // Leaves that match some criteria + errors []error // Errors that ocurred while matching on the leaves +} + +type chunks []*chunk + +func newChunks() *chunks { + var h chunks + heap.Init((*internal)(&h)) + return &h +} + +func (h *chunks) push(c *chunk) { + heap.Push((*internal)(h), c) +} + +func (h *chunks) pop() *chunk { + x := heap.Pop((*internal)(h)) + return x.(*chunk) +} + +// gap returns true if there's a gap between the provided start index and the +// top most chunk. If the top most chunk is in sequence, it is merged with +// any following chunks that are also in sequence to form one larger chunk. +func (h *chunks) gap(start uint64) bool { + if len(*h) == 0 { + return true + } + + top := h.pop() + if start != top.startIndex { + h.push(top) + return true + } + + for len(*h) > 0 { + c := h.pop() + if c.startIndex != top.startIndex+uint64(len(top.leafHashes)) { + h.push(c) + break + } + + top.leafHashes = append(top.leafHashes, c.leafHashes...) + top.matches = append(top.matches, c.matches...) + top.errors = append(top.errors, c.errors...) + } + + h.push(top) + return false +} + +// internal implements the heap interface, see example: +// https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/container/heap/example_intheap_test.go +type internal chunks + +func (h internal) Len() int { return len(h) } +func (h internal) Less(i, j int) bool { return h[i].startIndex < h[j].startIndex } +func (h internal) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +func (h *internal) Push(x any) { + *h = append(*h, x.(*chunk)) +} + +func (h *internal) Pop() any { + old := *h + n := len(old) + x := old[n-1] + old[n-1] = nil // avoid memory leak + *h = old[:n-1] + return x +} diff --git a/internal/monitor/matcher.go b/internal/monitor/matcher.go new file mode 100644 index 0000000..912e595 --- /dev/null +++ b/internal/monitor/matcher.go @@ -0,0 +1,13 @@ +package monitor + +type Matcher interface { + // Match determines if a log entry is considered to be a "match" based on + // some criteria. An error is returned if any certificate parsing fails. + Match(leafInput, extraData []byte) (bool, error) +} + +type MatchAll struct{} + +func (m *MatchAll) Match(leafInput, extraData []byte) (bool, error) { + return true, nil +} diff --git a/internal/monitor/monitor.go b/internal/monitor/monitor.go new file mode 100644 index 0000000..6accd97 --- /dev/null +++ b/internal/monitor/monitor.go @@ -0,0 +1,173 @@ +// Package monitor provides monitoring of Certificate Transparency logs. If +// running in continuous mode, the list of logs can be updated dynamically. +// +// Implement the Matcher interface to customize which certificates should be +// included in the monitor's emitted events. See MatchAll for an example. +// +// Note that this package verifies that the monitored logs are locally +// consistent with regard to the initial start-up state. It is up to the user +// to process the monitor's emitted events and errors, and to persist state. +package monitor + +import ( + "context" + "crypto/x509" + "encoding/base64" + "fmt" + "net/http" + "os" + "sync" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/client" + "github.com/google/certificate-transparency-go/jsonclient" + "gitlab.torproject.org/rgdd/ct/pkg/metadata" + "rgdd.se/silent-ct/internal/logger" +) + +// MonitoredLog provides information about a log the monitor is following +type MonitoredLog struct { + Config metadata.Log + State State +} + +// State is the latest append-only state the monitor observed from its local +// vantage point. The next entry to download is specified by NextIndex. +type State struct { + ct.SignedTreeHead `json:"latest_sth"` + NextIndex uint64 `json:"next_index"` +} + +// Event carries the latest consistent monitor state, found matches, as well as +// errors that occurred while trying to match on the downloaded log entries. +type Event struct { + State State + Matches []LogEntry + Errors []error +} + +func (ev *Event) Summary() string { + return fmt.Sprintf("log %s: tree size %d at next index %d (%d matches, %d errors)", + base64.StdEncoding.EncodeToString(ev.State.LogID[:]), + ev.State.TreeSize, ev.State.NextIndex, len(ev.Matches), len(ev.Errors)) +} + +// LogEntry is a Merkle tree leaf in a log +type LogEntry struct { + LeafIndex uint64 `json:"leaf_index"` + LeafData []byte `json:"leaf_data"` + ExtraData []byte `json:"extra_data"` +} + +type Config struct { + // Optional + Matcher Matcher // Which log entries to match (default is to match all) + Logger logger.Logger // Debug prints only (no output by default) + Contact string // Something that help log operators get in touch + ChunkSize uint // Min number of leaves to propagate a chunk without matches + BatchSize uint // Max number of certificates to accept per worker + NumWorkers uint // Number of parallel workers to use for each log +} + +type Monitor struct { + cfg Config + matcher Matcher + + eventCh chan Event + configCh chan MonitoredLog + errorCh chan error +} + +func New(cfg Config, evCh chan Event, cfgCh chan MonitoredLog, errCh chan error) (Monitor, error) { + if cfg.Matcher == nil { + cfg.Matcher = &MatchAll{} + } + if !cfg.Logger.IsConfigured() { + cfg.Logger = logger.New(logger.Config{Level: logger.LevelNotice, File: os.Stderr}) + } + if cfg.Contact == "" { + cfg.Contact = "unknown-user" + } + if cfg.ChunkSize == 0 { + cfg.ChunkSize = 256 // FIXME: 16364 + } + if cfg.BatchSize == 0 { + cfg.BatchSize = 1024 + } + if cfg.NumWorkers == 0 { + cfg.NumWorkers = 2 + } + return Monitor{cfg: cfg, matcher: cfg.Matcher, eventCh: evCh, configCh: cfgCh, errorCh: errCh}, nil +} + +func (mon *Monitor) RunOnce(ctx context.Context, cfg []MonitoredLog, evCh chan Event, errCh chan error) error { + return fmt.Errorf("TODO") +} + +func (mon *Monitor) RunForever(ctx context.Context) error { + var wg sync.WaitGroup + defer wg.Wait() + + mctx, cancel := context.WithCancel(ctx) + defer cancel() + + monitoring := make(map[metadata.LogURL]context.CancelFunc) + for { + select { + case <-ctx.Done(): + return nil + case log := <-mon.configCh: + if tcancel, ok := monitoring[log.Config.URL]; ok { + delete(monitoring, log.Config.URL) + tcancel() + continue + } + + newTail := mon.newTailRFC6962 + if log.Config.DNS != nil { // FIXME: get a real nob for tile-based logs + newTail = mon.newTailTile + } + t, err := newTail(log) + if err != nil { + return err + } + + tctx, tcancel := context.WithCancel(mctx) + monitoring[log.Config.URL] = tcancel + + wg.Add(1) + go func(log MonitoredLog, t tail) { + defer wg.Done() + defer tcancel() + t.run(tctx, log, mon.eventCh, mon.errorCh) + }(log, t) + } + } +} + +const userAgentPrefix = "rgdd.se/silent-ct" + +func (mon *Monitor) newTailRFC6962(log MonitoredLog) (tail, error) { + key, err := x509.MarshalPKIXPublicKey(log.Config.Key.Public) + if err != nil { + return tail{}, err + } + cli, err := client.New(string(log.Config.URL), &http.Client{}, jsonclient.Options{ + Logger: &discard{}, + UserAgent: userAgentPrefix + "/" + mon.cfg.Contact, + PublicKeyDER: key, + }) + if err != nil { + return tail{}, err + } + + return tail{cfg: mon.cfg, scanner: cli, checker: cli, matcher: mon.matcher}, nil +} + +func (mon *Monitor) newTailTile(cfg MonitoredLog) (tail, error) { + return tail{}, fmt.Errorf("TODO") +} + +type discard struct{} + +func (n *discard) Printf(string, ...interface{}) {} diff --git a/internal/monitor/tail.go b/internal/monitor/tail.go new file mode 100644 index 0000000..0e16476 --- /dev/null +++ b/internal/monitor/tail.go @@ -0,0 +1,200 @@ +package monitor + +import ( + "context" + "crypto/sha256" + "fmt" + "sync" + "time" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/client" + "github.com/google/certificate-transparency-go/scanner" + "gitlab.torproject.org/rgdd/ct/pkg/merkle" +) + +type tail struct { + cfg Config + matcher Matcher + scanner scanner.LogClient + checker client.CheckLogClient +} + +func (t *tail) run(ctx context.Context, mon MonitoredLog, eventCh chan Event, errorCh chan error) { + chunkCh := make(chan *chunk) + defer close(chunkCh) + + mctx, cancel := context.WithCancel(ctx) + defer cancel() + + var wg sync.WaitGroup + defer wg.Wait() + + callback := func(eb scanner.EntryBatch) { + c := chunk{startIndex: uint64(eb.Start)} + for i := 0; i < len(eb.Entries); i++ { + c.leafHashes = append(c.leafHashes, merkle.HashLeafNode(eb.Entries[i].LeafInput)) + match, err := t.matcher.Match(eb.Entries[i].LeafInput, eb.Entries[i].ExtraData) + if err != nil { + c.errors = append(c.errors, fmt.Errorf("while processing index %d for %s: %v", i, mon.Config.URL, err)) + continue + } + if !match { + continue + } + + c.matches = append(c.matches, LogEntry{ + LeafIndex: c.startIndex + uint64(i), + LeafData: eb.Entries[i].LeafInput, + ExtraData: eb.Entries[i].ExtraData, + }) + } + + chunkCh <- &c + } + + fetcher := scanner.NewFetcher(t.scanner, &scanner.FetcherOptions{ + BatchSize: int(t.cfg.BatchSize), + StartIndex: int64(mon.State.NextIndex), + ParallelFetch: int(t.cfg.NumWorkers), + Continuous: true, // FIXME: don't set this for read-only log + }) + + wg.Add(1) + go func() { + defer wg.Done() + defer cancel() + fetcher.Run(mctx, callback) + }() + + wg.Add(1) + go func() { + defer wg.Done() + defer cancel() + t.sequence(mctx, mon, eventCh, errorCh, chunkCh) + }() +} + +func (t *tail) sequence(ctx context.Context, mon MonitoredLog, eventCh chan Event, errorCh chan error, chunkCh chan *chunk) { + state := mon.State + heap := newChunks() + for { + select { + case <-ctx.Done(): + return // FIXME: check if we can pop something before return + case c := <-chunkCh: + heap.push(c) + if heap.gap(state.NextIndex) { + continue + } + c = heap.pop() + if len(c.matches) == 0 && len(c.leafHashes) < int(t.cfg.ChunkSize) { + heap.push(c) + continue // FIXME: don't trigger if we havn't run nextState for too long + } + nextState, err := t.nextState(ctx, state, c) + if err != nil { + errorCh <- err + heap.push(c) + continue + } + + state = nextState + eventCh <- Event{State: state, Matches: c.matches, Errors: c.errors} + } + } +} + +func (t *tail) nextState(ctx context.Context, state State, c *chunk) (State, error) { + newState, err := t.nextConsistentState(ctx, state) + if err != nil { + return State{}, err + } + newState, err = t.nextIncludedState(ctx, newState, c) + if err != nil { + return State{}, err + } + return newState, nil +} + +func (t *tail) nextConsistentState(ctx context.Context, state State) (State, error) { + sth, err := getSignedTreeHead(ctx, t.checker) + if err != nil { + return State{}, fmt.Errorf("%s: get-sth: %v", t.checker.BaseURI(), err) + } + sth.LogID = state.SignedTreeHead.LogID + oldSize := state.TreeSize + oldRoot := state.SHA256RootHash + newSize := sth.TreeSize + newRoot := sth.SHA256RootHash + + proof, err := getConsistencyProof(ctx, t.checker, oldSize, newSize) + if err != nil { + return State{}, fmt.Errorf("%s: get-consistency: %v", t.checker.BaseURI(), err) + } + if err := merkle.VerifyConsistency(oldSize, newSize, oldRoot, newRoot, unslice(proof)); err != nil { + return State{}, fmt.Errorf("%s: verify consistency: %v", t.checker.BaseURI(), err) + } + + return State{SignedTreeHead: *sth, NextIndex: state.NextIndex}, nil +} + +func (t *tail) nextIncludedState(ctx context.Context, state State, c *chunk) (State, error) { + leafHash := c.leafHashes[0] + oldSize := state.NextIndex + uint64(len(c.leafHashes)) + iproof, err := getInclusionProof(ctx, t.checker, leafHash, oldSize) // FIXME: set leaf index in ctx to hack into tile API + if err != nil { + return State{}, fmt.Errorf("%s: get-inclusion: %v", t.checker.BaseURI(), err) + } + if got, want := uint64(iproof.LeafIndex), state.NextIndex; got != want { + return State{}, fmt.Errorf("%s: wrong index for get-inclusion proof query %x:%d", t.checker.BaseURI(), leafHash[:], oldSize) + } + oldRoot, err := merkle.TreeHeadFromRangeProof(c.leafHashes, state.NextIndex, unslice(iproof.AuditPath)) + if err != nil { + return State{}, fmt.Errorf("%s: range proof: %v", t.checker.BaseURI(), err) + } + + newSize := state.TreeSize + newRoot := state.SHA256RootHash + cproof, err := getConsistencyProof(ctx, t.checker, oldSize, newSize) + if err != nil { + return State{}, fmt.Errorf("%s: get-consistency: %v", t.checker.BaseURI(), err) + } + if err := merkle.VerifyConsistency(oldSize, newSize, oldRoot, newRoot, unslice(cproof)); err != nil { + return State{}, fmt.Errorf("%s: verify consistency: %v", t.checker.BaseURI(), err) + } + + state.NextIndex += uint64(len(c.leafHashes)) + return state, nil +} + +func getInclusionProof(ctx context.Context, cli client.CheckLogClient, leafHash [sha256.Size]byte, size uint64) (*ct.GetProofByHashResponse, error) { + rctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return cli.GetProofByHash(rctx, leafHash[:], size) +} + +func getConsistencyProof(ctx context.Context, cli client.CheckLogClient, oldSize, newSize uint64) ([][]byte, error) { + if oldSize == 0 || oldSize >= newSize { + return [][]byte{}, nil + } + rctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return cli.GetSTHConsistency(rctx, oldSize, newSize) +} + +func getSignedTreeHead(ctx context.Context, cli client.CheckLogClient) (*ct.SignedTreeHead, error) { + rctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return cli.GetSTH(rctx) +} + +func unslice(hashes [][]byte) [][sha256.Size]byte { + var ret [][sha256.Size]byte + for _, hash := range hashes { + var h [sha256.Size]byte + copy(h[:], hash) + ret = append(ret, h) + } + return ret +} diff --git a/internal/options/options.go b/internal/options/options.go deleted file mode 100644 index 3e253c5..0000000 --- a/internal/options/options.go +++ /dev/null @@ -1,97 +0,0 @@ -package options - -import ( - "encoding/json" - "flag" - "fmt" - "os" - - "rgdd.se/silent-ct/internal/manager" - "rgdd.se/silent-ct/pkg/monitor" - "rgdd.se/silent-ct/pkg/server" -) - -const usage = `Usage: - - silent-ct [Options] - -Options: - - -h, --help: Output usage message and exit - -c, --config: Path to a configuration file (Default: %s) - -l, --listen: Listen address to receive submission on (Default: %s) - -s, --state: Path to a directory where state is stored (Default: %s) - -Example configuration file: - - { - "monitor": [ - { - "wildcard": "example.org", - "excludes": [ - "test" - ] - } - ], - "nodes": [ - { - "name": "node_a", - "secret": "aaaa", - "issues": [ - "example.org", - "www.example.org" - ] - } - ] - } - -` - -// Options are command-line options the user can specify -type Options struct { - ListenAddr string - ConfigFile string - StateDir string -} - -func New(cmd string, args []string) (opts Options, err error) { - fs := flag.NewFlagSet(cmd, flag.ContinueOnError) - fs.Usage = func() { - fmt.Fprintf(os.Stderr, usage, server.DefaultConfigFile, server.DefaultAddress, manager.DefaultStateDir) - } - stringOpt(fs, &opts.ConfigFile, "config", "c", server.DefaultConfigFile) - stringOpt(fs, &opts.ListenAddr, "listen", "l", server.DefaultAddress) - stringOpt(fs, &opts.StateDir, "state", "s", manager.DefaultStateDir) - if err = fs.Parse(args); err != nil { - return opts, err - } - - if opts.ConfigFile == "" { - return opts, fmt.Errorf("-c, --config: must not be an empty string") - } - if opts.StateDir == "" { - return opts, fmt.Errorf("-s, --state: must not be an empty string") - } - if opts.ListenAddr == "" { - return opts, fmt.Errorf("-l, --listen: must not be an empty string") - } - return opts, err -} - -func stringOpt(fs *flag.FlagSet, opt *string, short, long, value string) { - fs.StringVar(opt, short, value, "") - fs.StringVar(opt, long, value, "") -} - -type Config struct { - Monitor monitor.MatchWildcards `json:"monitor"` - Nodes server.Nodes `json:"nodes"` -} - -func (c *Config) FromFile(fileName string) error { - b, err := os.ReadFile(fileName) - if err != nil { - return err - } - return json.Unmarshal(b, c) -} diff --git a/internal/x509util/x509util.go b/internal/x509util/x509util.go deleted file mode 100644 index 912d1b4..0000000 --- a/internal/x509util/x509util.go +++ /dev/null @@ -1,44 +0,0 @@ -package x509util - -import ( - "crypto/x509" - "encoding/pem" - "fmt" -) - -// ParseChain parses a certificate chain in PEM format. At least one -// certificate must be in the chain. The first certificate must be a leaf, -// whereas all other certificates must CA certificates (intermdiates/roots). -// -// Note: it is not checked if the certificate chain's root is trusted or not. -func ParseChain(b []byte) ([]x509.Certificate, error) { - var chain []x509.Certificate - - for { - block, rest := pem.Decode(b) - if block == nil { - break - } - crt, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, fmt.Errorf("parse certificate: %v", err) - } - - chain = append(chain, *crt) - b = rest - } - - if len(chain) == 0 { - return nil, fmt.Errorf("no certificates in the provided chain") - } - if chain[0].IsCA { - return nil, fmt.Errorf("leaf certificate has the CA bit set") - } - for _, crt := range chain[1:] { - if !crt.IsCA { - return nil, fmt.Errorf("non-leaf certificate without the CA bit set") - } - } - - return chain, nil -} diff --git a/main.go b/main.go deleted file mode 100644 index 14c1b71..0000000 --- a/main.go +++ /dev/null @@ -1,109 +0,0 @@ -package main - -import ( - "context" - "errors" - "flag" - "fmt" - "os" - "os/signal" - "sync" - "syscall" - - "rgdd.se/silent-ct/internal/manager" - "rgdd.se/silent-ct/internal/options" - "rgdd.se/silent-ct/pkg/monitor" - "rgdd.se/silent-ct/pkg/server" -) - -func main() { - opts, err := options.New(os.Args[0], os.Args[1:]) - if err != nil { - if errors.Is(err, flag.ErrHelp) { - os.Exit(0) - } - die("options: %v", err) - } - var c options.Config - if err := c.FromFile(opts.ConfigFile); err != nil { - die("configuration: %v", err) - } - fmt.Printf("%v\n", c) - - srv, err := server.New(server.Config{Address: opts.ListenAddr, Nodes: c.Nodes}) - if err != nil { - die("create new server: %v", err) - } - mon, err := monitor.New(monitor.Config{Callback: &c.Monitor}) - if err != nil { - die("create new monitor: %v", err) - } - mgr, err := manager.New(manager.Config{Nodes: c.Nodes}) - if err != nil { - die("create new manager: %v", err) - } - - configCh := make(chan []monitor.MessageLogConfig) - defer close(configCh) - - progressCh := make(chan monitor.MessageLogProgress) - defer close(progressCh) - - submitCh := make(chan server.MessageNodeSubmission) - defer close(submitCh) - - errorCh := make(chan error) - defer close(errorCh) - - ctx, cancel := context.WithCancel(context.Background()) - var wg sync.WaitGroup - defer wg.Wait() - - wg.Add(1) - go func() { - defer wg.Done() - defer cancel() - await(ctx) - }() - - wg.Add(1) - go func() { - defer wg.Done() - defer cancel() - if err := srv.Run(ctx, submitCh, errorCh); err != nil { - die("server: %v\n", err) - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - defer cancel() - mon.Run(ctx, configCh, progressCh, errorCh) - }() - - wg.Add(1) - go func() { - defer wg.Done() - defer cancel() - if err := mgr.Run(ctx, submitCh, progressCh, configCh, errorCh); err != nil { - die("manager: %v\n", err) - } - }() -} - -func await(ctx context.Context) { - sigs := make(chan os.Signal, 1) - defer close(sigs) - - signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) - select { - case <-sigs: - case <-ctx.Done(): - } -} - -func die(format string, args ...interface{}) { - fmt.Printf("fatal: "+format, args) - os.Exit(1) -} diff --git a/pkg/crtutil/crt_util.go b/pkg/crtutil/crt_util.go new file mode 100644 index 0000000..11bcd7e --- /dev/null +++ b/pkg/crtutil/crt_util.go @@ -0,0 +1,71 @@ +// Package crtutil provides utility functions for working with certificates. +package crtutil + +import ( + "crypto/sha256" + "encoding/pem" + "fmt" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/x509" +) + +// CertificateChainFromPEM parses a certificate chain in PEM format. At least +// one certificate must be in the chain. The first certificate must be a leaf, +// whereas all other certificates must CA certificates (intermdiates/roots). +func CertificateChainFromPEM(b []byte) ([]x509.Certificate, error) { + var chain []x509.Certificate + + for { + block, rest := pem.Decode(b) + if block == nil { + break + } + crt, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parse certificate: %v", err) + } + + chain = append(chain, *crt) + b = rest + } + + if len(chain) == 0 { + return nil, fmt.Errorf("no certificates in the provided chain") + } + if chain[0].IsCA { + return nil, fmt.Errorf("leaf certificate has the CA bit set") + } + for _, crt := range chain[1:] { + if !crt.IsCA { + return nil, fmt.Errorf("non-leaf certificate without the CA bit set") + } + } + + return chain, nil +} + +// CertificateFromLogEntry parses the (pre-)certificate in a log entry +func CertificateFromLogEntry(leafData, extraData []byte) (x509.Certificate, error) { + entry, err := ct.LogEntryFromLeaf(0, &ct.LeafEntry{LeafInput: leafData, ExtraData: extraData}) + if err != nil { + return x509.Certificate{}, fmt.Errorf("parse leaf: %v", err) + } + if entry.Precert == nil && entry.X509Cert == nil { + return x509.Certificate{}, fmt.Errorf("neither precertificate nor certificate in leaf") + } + if entry.Precert != nil && entry.X509Cert != nil { + return x509.Certificate{}, fmt.Errorf("both certificate and precertificate in leaf") + } + if entry.Precert != nil { + return *entry.Precert.TBSCertificate, nil + } + return *entry.X509Cert, nil +} + +// UniqueID derives a unique certificate ID. The same value is derived +// regardless of if the (pre-)certificate is logged multiple times. +func UniqueID(crt x509.Certificate) string { + h := sha256.Sum256([]byte(crt.SerialNumber.String())) + return fmt.Sprintf("FIXME:%x", h[:]) // not a secure mapping +} diff --git a/pkg/monitor/chunks.go b/pkg/monitor/chunks.go deleted file mode 100644 index 87871b9..0000000 --- a/pkg/monitor/chunks.go +++ /dev/null @@ -1,88 +0,0 @@ -package monitor - -// -// A min heap of chunks, oredered on each chunk's start index. -// -// Credit: inspiration to use a heap from Aaron Gable, see -// https://github.com/aarongable/ctaudit -// - -import ( - "container/heap" - "crypto/sha256" -) - -type chunk struct { - startIndex uint64 // Index of the first leaf - leafHashes [][sha256.Size]byte // List of consecutive leaf hashes - matches []LogEntry // Leaves that matches some criteria - errors []error // Errors that ocurred while parsing leaves -} - -type chunks []*chunk - -func newChunks() *chunks { - var h chunks - heap.Init((*internal)(&h)) - return &h -} - -func (h *chunks) push(c *chunk) { - heap.Push((*internal)(h), c) -} - -func (h *chunks) pop() *chunk { - x := heap.Pop((*internal)(h)) - return x.(*chunk) -} - -// gap returns true if there's a gap between the provided start index and the -// top most chunk. If the top most chunk is in sequence, it is merged with -// any following chunks that are also in sequence to form one larger chunk. -func (h *chunks) gap(start uint64) bool { - if len(*h) == 0 { - return true - } - - top := h.pop() - if start != top.startIndex { - h.push(top) - return true - } - - for len(*h) > 0 { - c := h.pop() - if c.startIndex != top.startIndex+uint64(len(top.leafHashes)) { - h.push(c) - break - } - - top.leafHashes = append(top.leafHashes, c.leafHashes...) - top.matches = append(top.matches, c.matches...) - top.errors = append(top.errors, c.errors...) - } - - h.push(top) - return false -} - -// internal implements the heap interface, see example: -// https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/container/heap/example_intheap_test.go -type internal chunks - -func (h internal) Len() int { return len(h) } -func (h internal) Less(i, j int) bool { return h[i].startIndex < h[j].startIndex } -func (h internal) Swap(i, j int) { h[i], h[j] = h[j], h[i] } - -func (h *internal) Push(x any) { - *h = append(*h, x.(*chunk)) -} - -func (h *internal) Pop() any { - old := *h - n := len(old) - x := old[n-1] - old[n-1] = nil // avoid memory leak - *h = old[:n-1] - return x -} diff --git a/pkg/monitor/errors.go b/pkg/monitor/errors.go deleted file mode 100644 index 4d676af..0000000 --- a/pkg/monitor/errors.go +++ /dev/null @@ -1,41 +0,0 @@ -package monitor - -import ( - "fmt" - - ct "github.com/google/certificate-transparency-go" -) - -// ErrorFetch occurs if there's a problem hitting the log's HTTP API. An STH is -// provided if available, since it might carry evidence of some log misbehavior. -type ErrorFetch struct { - URL string - Msg string - Err error - STH *ct.SignedTreeHead -} - -func (e ErrorFetch) Error() string { - return fmt.Sprintf("%s: %s: %v", e.URL, e.Msg, e.Err) -} - -// ErrorMerkleTree occurs if the log's Merkle tree can't be verified. An STH is -// provided if available (i.e., won't be available for internal tree building). -type ErrorMerkleTree struct { - URL string - Msg string - Err error - STH *ct.SignedTreeHead -} - -func (e ErrorMerkleTree) Error() string { - return fmt.Sprintf("%s: %s: %v", e.URL, e.Msg, e.Err) -} - -// TODO: MMD violations -// TODO: Growing read-only logs - -// noout implements the Logger interface to discard unwanted output -type noout struct{} - -func (n *noout) Printf(string, ...interface{}) {} diff --git a/pkg/monitor/matcher.go b/pkg/monitor/matcher.go deleted file mode 100644 index fa3a894..0000000 --- a/pkg/monitor/matcher.go +++ /dev/null @@ -1,90 +0,0 @@ -package monitor - -import ( - "fmt" - "strings" - - ct "github.com/google/certificate-transparency-go" -) - -type Matcher interface { - Match(leafInput, extraData []byte) (bool, error) -} - -// MatchAll matches all certificates -type MatchAll struct{} - -func (m *MatchAll) Match(leafInput, extraData []byte) (bool, error) { - return true, nil -} - -// MatchWildcards matches a list of wildcards, see the MatchWildcard type -type MatchWildcards []MatchWildcard - -func (m *MatchWildcards) Match(leafInput, extraData []byte) (bool, error) { - sans, err := getSANs(ct.LeafEntry{LeafInput: leafInput, ExtraData: extraData}) - if err != nil { - return false, err - } - return m.match(sans), nil -} - -func (m *MatchWildcards) match(sans []string) bool { - for _, mw := range (*m)[:] { - if mw.match(sans) { - return true - } - } - return false -} - -// MatchWildcard exclude matches for `.*\.`, but will -// otherwise match on any `.*\.` as well as SANs equal to . -// -// For example, let be example.org and Exclude be [foo, bar]. Then -// example.org and www.example.org would match, whereas foo.example.org, -// sub.foo.example.org, and bar.example.org. would not match. -type MatchWildcard struct { - Wildcard string `json:"wildcard"` - Excludes []string `json:"excludes"` -} - -func (m *MatchWildcard) match(sans []string) bool { - for _, san := range sans { - if san == m.Wildcard { - return true - } - if strings.HasSuffix(san, "."+m.Wildcard) && !m.exclude(san) { - return true - } - } - return false -} - -func (m *MatchWildcard) exclude(san string) bool { - for _, exclude := range m.Excludes { - suffix := exclude + "." + m.Wildcard - if strings.HasSuffix(san, suffix) { - return true - } - } - return false -} - -func getSANs(entry ct.LeafEntry) ([]string, error) { - // Warning: here be dragons, parsing of DNS names in certificates... - e, err := ct.LogEntryFromLeaf(0, &entry) - if err != nil { - return nil, fmt.Errorf("parse leaf: %v", err) - } - if e.Precert == nil && e.X509Cert == nil { - return nil, fmt.Errorf("neither precertificate nor certificate in leaf") - } - if e.Precert != nil && e.X509Cert != nil { - return nil, fmt.Errorf("both certificate and precertificate in leaf") - } - if e.Precert != nil { - return e.Precert.TBSCertificate.DNSNames, nil - } - return e.X509Cert.DNSNames, nil -} diff --git a/pkg/monitor/messages.go b/pkg/monitor/messages.go deleted file mode 100644 index 717aae6..0000000 --- a/pkg/monitor/messages.go +++ /dev/null @@ -1,40 +0,0 @@ -package monitor - -import ( - ct "github.com/google/certificate-transparency-go" - "gitlab.torproject.org/rgdd/ct/pkg/metadata" -) - -// MessageLogConfig provides information about a log the monitor is downloading -type MessageLogConfig struct { - Metadata metadata.Log - State MonitorState -} - -// MessageLogProgress is the next log state and any encountered leaves that were -// considered matching since the previous log state. Parse errors are included. -type MessageLogProgress struct { - State MonitorState - Matches []LogEntry - Errors []error -} - -// MonitorState describes the monitor's state for a particular log. The signed tree -// head is the latest verified append-only state that was observed. The index -// is the next leaf which will be downloaded and processed by the monitor. -type MonitorState struct { - LogState - NextIndex uint64 -} - -// LogState describes the state of a log -type LogState struct { - ct.SignedTreeHead -} - -// LogEntry is a leaf in a log's Merkle tree -type LogEntry struct { - LeafIndex uint64 - LeafData []byte - ExtraData []byte -} diff --git a/pkg/monitor/monitor.go b/pkg/monitor/monitor.go deleted file mode 100644 index 5f7a629..0000000 --- a/pkg/monitor/monitor.go +++ /dev/null @@ -1,286 +0,0 @@ -// Package monitor provides a Certificate Transparency monitor that tails a list -// of logs which can be updated dynamically while running. All emitted progress -// messages have been verified by the monitor to be included in the log's -// append-only Merkle tree with regard to the initial start-up state. It is up -// to the user to process the monitor's progress, errors, and persist state. -// -// Implement the Matcher interface to customize which certificates should be -// included in a log's progress messages, or use any of the existing matchers -// provided by this package (see for example MatchAll and MatchWildcards). -package monitor - -import ( - "context" - "crypto/sha256" - "fmt" - "net/http" - "sync" - "time" - - ct "github.com/google/certificate-transparency-go" - "github.com/google/certificate-transparency-go/client" - "github.com/google/certificate-transparency-go/jsonclient" - "github.com/google/certificate-transparency-go/scanner" - "rgdd.se/silent-ct/internal/merkle" -) - -const ( - UserAgentPrefix = "rgdd.se/silent-ct" - DefaultContact = "unknown-user" - DefaultChunkSize = 256 // TODO: increase me - DefaultBatchSize = 128 // TODO: increase me - DefaultNumWorkers = 2 -) - -type Config struct { - Contact string // Something that help log operators get in touch - ChunkSize int // Min number of leaves to propagate a chunk without matches - BatchSize int // Max number of certificates to accept per worker - NumWorkers int // Number of parallel workers to use for each log - - // Callback determines which certificates are interesting to detect - Callback Matcher -} - -type Monitor struct { - Config -} - -func New(cfg Config) (Monitor, error) { - if cfg.Contact == "" { - cfg.Contact = "unknown-user" - } - if cfg.ChunkSize <= 0 { - cfg.ChunkSize = DefaultChunkSize - } - if cfg.BatchSize <= 0 { - cfg.BatchSize = DefaultBatchSize - } - if cfg.NumWorkers <= 0 { - cfg.NumWorkers = DefaultNumWorkers - } - if cfg.Callback == nil { - cfg.Callback = &MatchAll{} - } - return Monitor{Config: cfg}, nil -} - -func (mon *Monitor) Run(ctx context.Context, metadataCh chan []MessageLogConfig, eventCh chan MessageLogProgress, errorCh chan error) { - var wg sync.WaitGroup - var sctx context.Context - var cancel context.CancelFunc - - for { - select { - case <-ctx.Done(): - return - case metadata := <-metadataCh: - fmt.Printf("DEBUG: received new list with %d logs\n", len(metadata)) - if cancel != nil { - fmt.Printf("DEBUG: stopping all log tailers\n") - cancel() - wg.Wait() - } - - sctx, cancel = context.WithCancel(ctx) - for _, md := range metadata { - fmt.Printf("DEBUG: starting log tailer %s\n", md.Metadata.URL) - wg.Add(1) - go func(lcfg MessageLogConfig) { - defer wg.Done() - - opts := jsonclient.Options{Logger: &noout{}, UserAgent: UserAgentPrefix + ":" + mon.Contact} - cli, err := client.New(string(lcfg.Metadata.URL), &http.Client{}, opts) - if err != nil { - errorCh <- fmt.Errorf("unable to configure %s: %v", lcfg.Metadata.URL, err) - return - } - - chunkCh := make(chan *chunk) - defer close(chunkCh) - - t := tail{mon.Config, *cli, chunkCh, eventCh, errorCh} - if err := t.run(sctx, lcfg.State); err != nil { - errorCh <- fmt.Errorf("unable to continue tailing %s: %v", lcfg.Metadata.URL, err) - } - }(md) - } - } - } -} - -type tail struct { - mcfg Config - cli client.LogClient - - chunkCh chan *chunk - eventCh chan MessageLogProgress - errorCh chan error -} - -func (t *tail) run(ctx context.Context, state MonitorState) error { - var wg sync.WaitGroup - defer wg.Wait() - - mctx, cancel := context.WithCancel(ctx) - defer cancel() - - wg.Add(1) - go func() { - defer wg.Done() - defer cancel() - t.sequence(mctx, state) - }() - - fetcher := scanner.NewFetcher(&t.cli, &scanner.FetcherOptions{ - BatchSize: t.mcfg.BatchSize, - StartIndex: int64(state.NextIndex), - ParallelFetch: t.mcfg.NumWorkers, - Continuous: true, - }) - callback := func(eb scanner.EntryBatch) { - c := chunk{startIndex: uint64(eb.Start)} - for i := 0; i < len(eb.Entries); i++ { - c.leafHashes = append(c.leafHashes, merkle.HashLeafNode(eb.Entries[i].LeafInput)) - match, err := t.mcfg.Callback.Match(eb.Entries[i].LeafInput, eb.Entries[i].ExtraData) - if err != nil { - c.errors = append(c.errors, fmt.Errorf("while processing index %d for %s: %v", i, t.cli.BaseURI(), err)) - } else if match { - c.matches = append(c.matches, LogEntry{ - LeafIndex: uint64(i), - LeafData: eb.Entries[i].LeafInput, - ExtraData: eb.Entries[i].ExtraData, - }) - } - } - t.chunkCh <- &c - } - return fetcher.Run(mctx, callback) -} - -func (t *tail) sequence(ctx context.Context, state MonitorState) { - heap := newChunks() - for { - select { - case <-ctx.Done(): - return - case c := <-t.chunkCh: - heap.push(c) - if heap.gap(state.NextIndex) { - continue - } - c = heap.pop() - if len(c.matches) == 0 && len(c.leafHashes) < t.mcfg.ChunkSize { - heap.push(c) - continue // TODO: don't trigger if we havn't run nextState for too long - } - nextState, err := t.nextState(ctx, state, c) - if err != nil { - t.errorCh <- err - heap.push(c) - continue - } - - state = nextState - t.eventCh <- MessageLogProgress{State: state, Matches: c.matches, Errors: c.errors} - } - } -} - -func (t *tail) nextState(ctx context.Context, state MonitorState, c *chunk) (MonitorState, error) { - newState, err := t.nextConsistentState(ctx, state) - if err != nil { - return MonitorState{}, err - } - newState, err = t.nextIncludedState(ctx, state, c) - if err != nil { - return MonitorState{}, err - } - return newState, nil -} - -func (t *tail) nextConsistentState(ctx context.Context, state MonitorState) (MonitorState, error) { - sth, err := getSignedTreeHead(ctx, &t.cli) - if err != nil { - return MonitorState{}, ErrorFetch{URL: t.cli.BaseURI(), Msg: "get-sth", Err: err} - } - oldSize := state.TreeSize - oldRoot := state.SHA256RootHash - newSize := sth.TreeSize - newRoot := sth.SHA256RootHash - - proof, err := getConsistencyProof(ctx, &t.cli, oldSize, newSize) - if err != nil { - return MonitorState{}, ErrorFetch{URL: t.cli.BaseURI(), Msg: "get-consistency", STH: sth, Err: err} - } - if err := merkle.VerifyConsistency(oldSize, newSize, oldRoot, newRoot, unslice(proof)); err != nil { - return MonitorState{}, ErrorMerkleTree{URL: t.cli.BaseURI(), Msg: "consistency", STH: sth, Err: err} - } - - fmt.Printf("DEBUG: consistently updated STH from size %d to %d\n", oldSize, newSize) - return MonitorState{LogState: LogState{*sth}, NextIndex: state.NextIndex}, nil -} - -func (t *tail) nextIncludedState(ctx context.Context, state MonitorState, c *chunk) (MonitorState, error) { - leafHash := c.leafHashes[0] - oldSize := state.NextIndex + uint64(len(c.leafHashes)) - iproof, err := getInclusionProof(ctx, &t.cli, leafHash, oldSize) - if err != nil { - err = fmt.Errorf("leaf hash %x and tree size %d: %v", leafHash[:], oldSize, err) - return MonitorState{}, ErrorFetch{URL: t.cli.BaseURI(), Msg: "get-inclusion", Err: err} - } - if got, want := uint64(iproof.LeafIndex), state.NextIndex; got != want { - err := fmt.Errorf("leaf hash %x and tree size %d: expected leaf index %d but got %d", leafHash[:], oldSize, got, want) - return MonitorState{}, ErrorMerkleTree{URL: t.cli.BaseURI(), Msg: "proof-index", Err: err} - } - oldRoot, err := merkle.TreeHeadFromRangeProof(c.leafHashes, state.NextIndex, unslice(iproof.AuditPath)) - if err != nil { - return MonitorState{}, ErrorMerkleTree{URL: t.cli.BaseURI(), Msg: "inclusion", Err: err} - } - - newSize := state.TreeSize - newRoot := state.SHA256RootHash - cproof, err := getConsistencyProof(ctx, &t.cli, oldSize, newSize) - if err != nil { - err = fmt.Errorf("from size %d to %d: %v", oldSize, newSize, err) - return MonitorState{}, ErrorFetch{URL: t.cli.BaseURI(), Msg: "get-consistency", Err: err} - } - if err := merkle.VerifyConsistency(oldSize, newSize, oldRoot, newRoot, unslice(cproof)); err != nil { - err = fmt.Errorf("from size %d to %d: %v", oldSize, newSize, err) - return MonitorState{}, ErrorMerkleTree{URL: t.cli.BaseURI(), Msg: "consistency", Err: err} - } - - state.NextIndex += uint64(len(c.leafHashes)) - return state, nil -} - -func getInclusionProof(ctx context.Context, cli *client.LogClient, leafHash [sha256.Size]byte, size uint64) (*ct.GetProofByHashResponse, error) { - rctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - return cli.GetProofByHash(rctx, leafHash[:], size) -} - -func getConsistencyProof(ctx context.Context, cli *client.LogClient, oldSize, newSize uint64) ([][]byte, error) { - if oldSize == 0 || oldSize >= newSize { - return [][]byte{}, nil - } - rctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - return cli.GetSTHConsistency(rctx, oldSize, newSize) -} - -func getSignedTreeHead(ctx context.Context, cli *client.LogClient) (*ct.SignedTreeHead, error) { - rctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - return cli.GetSTH(rctx) -} - -func unslice(hashes [][]byte) [][sha256.Size]byte { - var ret [][sha256.Size]byte - for _, hash := range hashes { - var h [sha256.Size]byte - copy(h[:], hash) - ret = append(ret, h) - } - return ret -} diff --git a/pkg/policy/node.go b/pkg/policy/node.go new file mode 100644 index 0000000..23f04ca --- /dev/null +++ b/pkg/policy/node.go @@ -0,0 +1,93 @@ +package policy + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + + "golang.org/x/crypto/hkdf" +) + +type Node struct { + Name string `json:"name"` // Artbirary node name to authenticate + Secret string `json:"secret"` // Arbitrary node secret for authentication + URL string `json:"url"` // Where the node's submissions can be downloaded + Domains []string `json:"issues"` // Exact-match domain names allowed to be issued + + key [16]byte +} + +func NewNode(name, secret, url string, domains []string) (Node, error) { + n := Node{Name: name, Secret: secret, Domains: domains, URL: url} + if err := n.deriveKey(); err != nil { + return Node{}, err + } + return n, n.Validate() +} + +func (n *Node) UnmarshalJSON(data []byte) error { + type internal Node + if err := json.Unmarshal(data, (*internal)(n)); err != nil { + return err + } + if err := n.deriveKey(); err != nil { + return err + } + return n.Validate() +} + +func (n *Node) Validate() error { + if n.Name == "" { + return fmt.Errorf("name is required") + } + if n.Secret == "" { + return fmt.Errorf("secret is required") + } + if n.URL == "" { + return fmt.Errorf("url is required") + } + if n.key == [16]byte{} { + return fmt.Errorf("key needs to be derived") + } + return nil +} + +func (n *Node) Authorize(sans []string) error { + for _, san := range sans { + ok := false + for _, domain := range n.Domains { + if domain == san { + ok = true + break + } + } + + if !ok { + return fmt.Errorf("node %s is not authorized to issue certificate with name %s", n.Name, san) + } + } + return nil +} + +func (n *Node) HMAC(data []byte) (mac [sha256.Size]byte, err error) { + if err = n.Validate(); err != nil { + return + } + + h := hmac.New(sha256.New, n.key[:]) + _, err = h.Write(data) + + copy(mac[:], h.Sum(nil)) + return +} + +func (n *Node) deriveKey() error { + const salt = "silent-ct" + + hkdf := hkdf.New(sha256.New, []byte(n.Secret), []byte(salt), []byte(n.Name)) + _, err := io.ReadFull(hkdf, n.key[:]) + + return err +} diff --git a/pkg/policy/policy.go b/pkg/policy/policy.go new file mode 100644 index 0000000..8ee4867 --- /dev/null +++ b/pkg/policy/policy.go @@ -0,0 +1,18 @@ +// Package policy specifies which certificates to look for while monitoring, and +// how to pull legitimately issued certificates from trusted nodes based on a +// shared secret. Statically configured logs can also be specified, as well as +// logs that should not be monitored even if they appear in any dynamic list. +package policy + +import ( + "gitlab.torproject.org/rgdd/ct/pkg/metadata" +) + +type Policy struct { + Monitor Wildcards `json:"monitor"` + Nodes []Node `json:"nodes"` + + // Optional + StaticLogs []metadata.Log `json:"static_logs"` + RemoveLogs []metadata.LogKey `json:"remove_logs"` +} diff --git a/pkg/policy/wildcard.go b/pkg/policy/wildcard.go new file mode 100644 index 0000000..58b0d17 --- /dev/null +++ b/pkg/policy/wildcard.go @@ -0,0 +1,83 @@ +package policy + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "rgdd.se/silent-ct/pkg/crtutil" +) + +// Wildcards implement the monitor.Matcher interface for a list of wildcards. +// +// Warning: parsing of SANs in certificates is hard. This matcher depends on +// the parsing defined in github.com/google/certificate-transparency-go/x509. +type Wildcards []Wildcard + +func (w *Wildcards) Match(leafData, extraData []byte) (bool, error) { + crt, err := crtutil.CertificateFromLogEntry(leafData, extraData) + if err != nil { + return false, err + } + return w.match(crt.DNSNames, crt.NotAfter), nil +} + +func (w *Wildcards) match(sans []string, notAfter time.Time) bool { + for _, wildcard := range *w { + if wildcard.Match(sans, notAfter) { + return true + } + } + return false +} + +// Wildcard matches any string that ends with `Wildcard`, unless: +// +// 1. `Excludes[i] + "." + Wildcard` is a longer suffix match, or +// 2. the certificate expired before the BootstrapAt timestamp. +type Wildcard struct { + BootstrapAt time.Time `json:"bootstrap_at"` + Wildcard string `json:"wildcard"` + Excludes []string `json:"excludes",omitempty"` +} + +func (w *Wildcard) UnmarshalJSON(data []byte) error { + type internal Wildcard + if err := json.Unmarshal(data, (*internal)(w)); err != nil { + return err + } + return w.Validate() +} + +func (w *Wildcard) Validate() error { + if w.BootstrapAt.IsZero() { + return fmt.Errorf("bootstrap time is required") + } + if len(w.Wildcard) == 0 { + return fmt.Errorf("wildcard is required") + } + return nil +} + +func (w *Wildcard) Match(sans []string, expiresAt time.Time) bool { + for _, san := range sans { + if san == w.Wildcard { + return w.BootstrapAt.Before(expiresAt) + } + if strings.HasSuffix(san, "."+w.Wildcard) && !w.exclude(san) { + return w.BootstrapAt.Before(expiresAt) + } + } + return false +} + +func (w *Wildcard) exclude(san string) bool { + for _, exclude := range w.Excludes { + suffix := exclude + "." + w.Wildcard + if strings.HasSuffix(san, suffix) { + return true + } + } + return false +} diff --git a/pkg/server/handler.go b/pkg/server/handler.go deleted file mode 100644 index 6d17af7..0000000 --- a/pkg/server/handler.go +++ /dev/null @@ -1,32 +0,0 @@ -package server - -import ( - "fmt" - "net/http" -) - -// handler implements the http.Handler interface -type handler struct { - method string - endpoint string - callback func(w http.ResponseWriter, r *http.Request) (int, string) -} - -func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.Header().Set("Cache-Control", "no-cache") - if r.Method != h.method { - http.Error(w, "Invalid HTTP method, expected "+h.method, http.StatusMethodNotAllowed) - return - } - code, text := h.callback(w, r) - if code != http.StatusOK { - http.Error(w, text, code) - return - } - fmt.Fprintf(w, fmt.Sprintf("%s\n", text)) -} - -func (h handler) register(mux *http.ServeMux) { - mux.Handle("/"+h.endpoint, h) -} diff --git a/pkg/server/messages.go b/pkg/server/messages.go deleted file mode 100644 index a6ea243..0000000 --- a/pkg/server/messages.go +++ /dev/null @@ -1,23 +0,0 @@ -package server - -import ( - "fmt" - "time" -) - -type MessageNodeSubmission struct { - SerialNumber string - NotBefore time.Time - DomainNames []string - PEMChain []byte -} - -type ErrorUnauthorizedDomainName struct { - PEMChain []byte - Node Node - Err error -} - -func (e ErrorUnauthorizedDomainName) Error() string { - return fmt.Sprintf("%v", e.Err) -} diff --git a/pkg/server/nodes.go b/pkg/server/nodes.go deleted file mode 100644 index 164c06f..0000000 --- a/pkg/server/nodes.go +++ /dev/null @@ -1,53 +0,0 @@ -package server - -import ( - "crypto/x509" - "fmt" - "net/http" -) - -// Node is an identified system that can request certificates -type Node struct { - Name string `json:"name"` // Artbirary node name for authentication - Secret string `json:"secret"` // Arbitrary node secret for authentication - Domains []string `json:"issues"` // Exact-match domain names that are allowed -} - -func (node *Node) authenticate(r *http.Request) error { - user, password, ok := r.BasicAuth() - if !ok { - return fmt.Errorf("no http basic auth credentials") - } - if user != node.Name || password != node.Secret { - return fmt.Errorf("invalid http basic auth credentials") - } - return nil -} - -func (node *Node) check(crt x509.Certificate) error { - for _, san := range crt.DNSNames { - ok := false - for _, domain := range node.Domains { - if domain == san { - ok = true - break - } - } - if !ok { - return fmt.Errorf("%s: not authorized to issue certificates for %s", node.Name, san) - } - } - return nil -} - -// Nodes is a list of nodes that can request certificates -type Nodes []Node - -func (nodes *Nodes) authenticate(r *http.Request) (Node, error) { - for _, node := range (*nodes)[:] { - if err := node.authenticate(r); err == nil { - return node, nil - } - } - return Node{}, fmt.Errorf("no valid HTTP basic auth credentials") -} diff --git a/pkg/server/server.go b/pkg/server/server.go deleted file mode 100644 index 06eb258..0000000 --- a/pkg/server/server.go +++ /dev/null @@ -1,133 +0,0 @@ -package server - -import ( - "context" - "fmt" - "io" - "net" - "net/http" - "time" - - "rgdd.se/silent-ct/internal/x509util" -) - -const ( - EndpointAddChain = "add-chain" - EndpointGetStatus = "get-status" - - DefaultNetwork = "tcp" - DefaultAddress = "localhost:2009" - DefaultConfigFile = "/home/rgdd/.config/silent-ct/config.json" // FIXME -) - -type Config struct { - Network string // tcp or unix - Address string // hostname[:port] or path to a unix socket - Nodes Nodes // Which nodes are trusted to issue what certificates -} - -type Server struct { - Config - - eventCh chan MessageNodeSubmission - errorCh chan error -} - -func New(cfg Config) (Server, error) { - if cfg.Network == "" { - cfg.Network = DefaultNetwork - } - if cfg.Address == "" { - cfg.Network = DefaultAddress - } - return Server{Config: cfg}, nil -} - -func (srv *Server) Run(ctx context.Context, submitCh chan MessageNodeSubmission, errorCh chan error) error { - srv.eventCh = submitCh - srv.errorCh = errorCh - mux := http.NewServeMux() - for _, handler := range srv.handlers() { - handler.register(mux) - } - - listener, err := net.Listen(srv.Network, srv.Address) - if err != nil { - return fmt.Errorf("listen: %v", err) - } - defer listener.Close() - - s := http.Server{Handler: mux} - exitCh := make(chan error, 1) - defer close(exitCh) - go func() { - exitCh <- s.Serve(listener) - }() - - select { - case err := <-exitCh: - if err != nil && err != http.ErrServerClosed { - return fmt.Errorf("serve: %v", err) - } - case <-ctx.Done(): - tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - if err := s.Shutdown(tctx); err != nil { - return fmt.Errorf("shutdown: %v", err) - } - } - return nil -} - -func (srv *Server) handlers() []handler { - return []handler{ - handler{ - method: http.MethodGet, - endpoint: EndpointGetStatus, - callback: func(w http.ResponseWriter, r *http.Request) (int, string) { return srv.getStatus(w, r) }, - }, - handler{ - method: http.MethodPost, - endpoint: EndpointAddChain, - callback: func(w http.ResponseWriter, r *http.Request) (int, string) { return srv.addChain(w, r) }, - }, - } -} - -func (srv *Server) getStatus(w http.ResponseWriter, r *http.Request) (int, string) { - return http.StatusOK, "OK" -} - -func (srv *Server) addChain(w http.ResponseWriter, r *http.Request) (int, string) { - node, err := srv.Nodes.authenticate(r) - if err != nil { - return http.StatusForbidden, "Invalid HTTP Basic Auth credentials" - } - - b, err := io.ReadAll(r.Body) - if err != nil { - return http.StatusBadRequest, "Read HTTP POST body failed" - } - defer r.Body.Close() - - chain, err := x509util.ParseChain(b) - if err != nil { - return http.StatusBadRequest, "Malformed HTTP POST body" - } - if err := node.check(chain[0]); err != nil { - srv.errorCh <- ErrorUnauthorizedDomainName{ - PEMChain: b, - Node: node, - Err: err, - } - } else { - srv.eventCh <- MessageNodeSubmission{ - SerialNumber: chain[0].SerialNumber.String(), - NotBefore: chain[0].NotBefore, - DomainNames: chain[0].DNSNames, - PEMChain: b, - } - } - - return http.StatusOK, "OK" -} diff --git a/pkg/storage/errors.go b/pkg/storage/errors.go new file mode 100644 index 0000000..3d7997f --- /dev/null +++ b/pkg/storage/errors.go @@ -0,0 +1,5 @@ +package storage + +import "errors" + +var ErrorMonitorStateExists = errors.New("monitor state already exists on disk") diff --git a/pkg/storage/index/index.go b/pkg/storage/index/index.go new file mode 100644 index 0000000..ef9ad60 --- /dev/null +++ b/pkg/storage/index/index.go @@ -0,0 +1,103 @@ +// Package index provides an index of locally stored certificates. If a method +// succeeds, the index and the data that it tracks has been persisted to disk. +// If a method does not succeed, restore from the persisted index on disk. +package index + +import ( + "crypto/sha256" + "fmt" + "time" + + "rgdd.se/silent-ct/internal/ioutil" + "rgdd.se/silent-ct/internal/monitor" + "rgdd.se/silent-ct/pkg/crtutil" +) + +type Config struct { + PermitBootstrap bool // Create a new index if a valid one does not exist on disk yet + IndexFile string // Path to an index file that can be read/written + TrustDirectory string // Absolute path to an existing directory where legitimate certificates are stored + MatchDirectory string // Absolute path to an existing directory where matching certificates are stored + + // Optional + AlertDelay time.Duration // Time before alerting on certificates that are unaccounted for +} + +type Index struct { + mem index + cfg Config +} + +func New(cfg Config) (Index, error) { + ix := Index{cfg: cfg} + if err := ioutil.DirectoriesExist([]string{cfg.TrustDirectory, cfg.MatchDirectory}); err != nil { + return Index{}, err + } + if err := ioutil.ReadJSON(cfg.IndexFile, &ix.mem); err != nil { + if !cfg.PermitBootstrap { + return Index{}, err + } + + ix.mem = newIndex() + if err := ioutil.CommitJSON(cfg.IndexFile, ix.mem); err != nil { + return Index{}, err + } + } + return ix, ix.Validate() +} + +func (ix *Index) AddChain(node string, pem []byte) error { + chain, err := crtutil.CertificateChainFromPEM(pem) + if err != nil { + return err + } + + var crtID CertificateID + crtID.Set(chain[0]) + path := fmt.Sprintf("%s/%s-%s.pem", ix.cfg.TrustDirectory, node, crtID) + if !ix.mem.addChain(crtID, path) { + return nil // duplicate + } + + if ioutil.CommitData(path, pem); err != nil { + return err + } + return ioutil.CommitJSON(ix.cfg.IndexFile, ix.mem) +} + +func (ix *Index) AddEntries(logID [sha256.Size]byte, entries []monitor.LogEntry) error { + addEntry := func(entry monitor.LogEntry) error { + crt, err := crtutil.CertificateFromLogEntry(entry.LeafData, entry.ExtraData) + if err != nil { + return err + } + + var crtID CertificateID + crtID.Set(crt) + path := fmt.Sprintf("%s/%x-%d.json", ix.cfg.MatchDirectory, logID[:], entry.LeafIndex) + if !ix.mem.addEntry(crtID, path) { + return nil // duplicate + } + + return ioutil.CommitJSON(path, entry) + } + + for _, entry := range entries { + if err := addEntry(entry); err != nil { + return err + } + } + return ioutil.CommitJSON(ix.cfg.IndexFile, ix.mem) +} + +func (ix *Index) TriggerAlerts() ([]CertificateInfo, error) { + alerts := ix.mem.triggerAlerts(ix.cfg.AlertDelay) + if len(alerts) == 0 { + return []CertificateInfo{}, nil + } + return alerts, ioutil.CommitJSON(ix.cfg.IndexFile, ix.mem) +} + +func (index *Index) Validate() error { + return nil // FIXME: check that the index is populated with valid values +} diff --git a/pkg/storage/index/inmem.go b/pkg/storage/index/inmem.go new file mode 100644 index 0000000..0a084bf --- /dev/null +++ b/pkg/storage/index/inmem.go @@ -0,0 +1,113 @@ +package index + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/google/certificate-transparency-go/x509" + "rgdd.se/silent-ct/pkg/crtutil" +) + +type CertificateID string + +func (crtID *CertificateID) Set(crt x509.Certificate) { + *crtID = CertificateID(crtutil.UniqueID(crt)) +} + +type CertificateInfo struct { + ObservedAt time.Time `json:"observed_at"` + StoredAt string `json:"stored_at"` +} + +// index is an in-memory index of certificates +type index struct { + Alerting map[CertificateID][]CertificateInfo `json:"alerting"` // Certificates that were not marked as "good" on time + Legitimate map[CertificateID][]CertificateInfo `json:"legitimate"` // Certificates that are considered "good" + Pending map[CertificateID][]CertificateInfo `json:"pending"` // Certificates that have yet to be marked as "good" +} + +func newIndex() index { + return index{ + Alerting: make(map[CertificateID][]CertificateInfo), + Legitimate: make(map[CertificateID][]CertificateInfo), + Pending: make(map[CertificateID][]CertificateInfo), + } +} + +func (ix *index) JSONUnmarshal(b []byte) error { + type internal index + if err := json.Unmarshal(b, (*internal)(ix)); err != nil { + return err + } + for i, m := range []map[CertificateID][]CertificateInfo{ix.Alerting, ix.Legitimate, ix.Pending} { + if m == nil { + return fmt.Errorf("dictionary named %q is not in the index", []string{"alerting", "legitimate", "pending"}[i]) + } + } + return nil +} + +func (ix *index) triggerAlerts(delay time.Duration) []CertificateInfo { + var alerts []CertificateInfo + + for key, certInfos := range ix.Pending { + certInfo := certInfos[0] + if time.Since(certInfo.ObservedAt) < delay { + continue + } + + alerts = append(alerts, certInfo) + ix.Alerting[key] = certInfos + delete(ix.Pending, key) + } + + return alerts +} + +func (ix *index) addChain(crtID CertificateID, path string) bool { + if _, ok := ix.Legitimate[crtID]; ok { + return false // we already marked this certificate as "good" + } + + entry := CertificateInfo{ObservedAt: time.Now(), StoredAt: path} + crtInfos := []CertificateInfo{entry} + if v, ok := ix.Alerting[crtID]; ok { + crtInfos = append(crtInfos, v...) + delete(ix.Alerting, crtID) // no longer alerting + } else if v, ok := ix.Pending[crtID]; ok { + crtInfos = append(crtInfos, v...) + delete(ix.Pending, crtID) // no longer pending + } + + ix.Legitimate[crtID] = crtInfos + return true // index updated such that this certificate is marked as "good" +} + +func (ix *index) addEntry(crtID CertificateID, path string) bool { + crtInfo := CertificateInfo{ObservedAt: time.Now(), StoredAt: path} + if _, ok := ix.Legitimate[crtID]; ok { + return add(ix.Legitimate, crtID, crtInfo) + } else if _, ok := ix.Alerting[crtID]; ok { + return add(ix.Alerting, crtID, crtInfo) + } + return add(ix.Pending, crtID, crtInfo) +} + +func add(m map[CertificateID][]CertificateInfo, key CertificateID, value CertificateInfo) bool { + crtInfos, ok := m[key] + if !ok { + m[key] = []CertificateInfo{value} + return true + } + + for _, crtInfo := range crtInfos { + if value.StoredAt == crtInfo.StoredAt { + return false // duplicate + } + } + + crtInfos = append(crtInfos, value) + m[key] = crtInfos + return true +} diff --git a/pkg/storage/loglist/loglist.go b/pkg/storage/loglist/loglist.go new file mode 100644 index 0000000..ccc63b0 --- /dev/null +++ b/pkg/storage/loglist/loglist.go @@ -0,0 +1,145 @@ +// Package loglist manages a list of logs to monitor. The list of logs is based +// on Google's signed list. Logs can also be added and removed manually. +package loglist + +import ( + "context" + "fmt" + "time" + + "gitlab.torproject.org/rgdd/ct/pkg/metadata" + "rgdd.se/silent-ct/internal/ioutil" +) + +type Config struct { + PermitBootstrap bool // Get and create initial log metadata if nothing valid is available on disk + MetadataFile string // Path to a dynamically updated metadata file that can be read/written + HistoryDirectory string // Existing directory to store the history of downloaded metadata + + // Optional + MetadataInFuture time.Duration // How wrong the metadata timestamp is allowed to be wrt. future dating + MetadataGetsStale time.Duration // How long until the metadata is considered stale + MetadataIsRecent time.Duration // How long the metadata is considered recent + HTTPTimeout time.Duration // Timeout when fetching metadata + StaticLogs []metadata.Log // Takes precedence over the dynamically downloaded metadata + RemoveLogs []metadata.LogKey // Logs in the downloaded metadata with these keys are ignored +} + +type LogList struct { + cfg Config + md metadata.Metadata + source metadata.Loader +} + +func New(cfg Config) (LogList, error) { + if cfg.MetadataInFuture == 0 { + cfg.MetadataInFuture = 24 * time.Hour + } + if cfg.MetadataGetsStale == 0 { + cfg.MetadataGetsStale = 30 * 24 * time.Hour + } + if cfg.MetadataIsRecent == 0 { + cfg.MetadataIsRecent = 1 * time.Hour + } + if cfg.HTTPTimeout == 0 { + cfg.HTTPTimeout = 10 * time.Second + } + + for i, log := range cfg.StaticLogs { + if err := checkLog(log); err != nil { + return LogList{}, fmt.Errorf("static logs: index %d: %v", i, err) + } + } + for i, key := range cfg.RemoveLogs { + if _, err := key.ID(); err != nil { + return LogList{}, fmt.Errorf("remove logs: index %d: %v", i, err) + } + } + + s := metadata.NewHTTPSource(metadata.HTTPSourceOptions{Name: "google"}) + ll := LogList{cfg: cfg, source: &s} + if err := ioutil.DirectoriesExist([]string{cfg.HistoryDirectory}); err != nil { + return LogList{}, err + } + if err := ioutil.ReadJSON(cfg.MetadataFile, &ll.md); err != nil { + if !ll.cfg.PermitBootstrap { + return LogList{}, err + } + if _, _, err := ll.Update(context.Background()); err != nil { + return LogList{}, err + } + } + return ll, nil +} + +func (ll *LogList) IsRecent() bool { + return time.Now().Before(ll.md.CreatedAt.Add(ll.cfg.MetadataIsRecent)) +} + +func (ll *LogList) IsStale() bool { + return time.Now().After(ll.md.CreatedAt.Add(ll.cfg.MetadataGetsStale)) +} + +func (ll *LogList) Generate() []metadata.Log { + var configure []metadata.Log + + for _, log := range ll.cfg.StaticLogs { + configure = append(configure, log) + } + for _, operator := range ll.md.Operators { + for _, log := range operator.Logs { + if findKey(ll.cfg.RemoveLogs, log) { + continue // static configuration says to remove this log + } + if findLog(configure, log) { + continue // static configuration takes precedence + } + if skipLog(log) { + continue // not in a state where it makes sense to monitor + } + configure = append(configure, log) + } + } + + return configure +} + +func (ll *LogList) Update(ctx context.Context) (added []metadata.Log, removed []metadata.Log, err error) { + b, md, err := ll.archiveDynamic(ctx) + if err != nil { + return + } + if err = ioutil.CommitData(ll.cfg.MetadataFile, b); err != nil { + return + } + + added, removed = metadataLogDiff(ll.md, md) + ll.md = md + return +} + +func (ll *LogList) archiveDynamic(ctx context.Context) ([]byte, metadata.Metadata, error) { + llctx, cancel := context.WithTimeout(ctx, ll.cfg.HTTPTimeout) + defer cancel() + + msg, sig, md, err := ll.source.Load(llctx) + if err != nil { + return nil, metadata.Metadata{}, err + } + if future := time.Now().Add(ll.cfg.MetadataInFuture); md.CreatedAt.After(future) { + return nil, metadata.Metadata{}, fmt.Errorf("list created at %v is in the future", md.CreatedAt) + } + if md.CreatedAt.Before(ll.md.CreatedAt) { + return nil, metadata.Metadata{}, fmt.Errorf("list created at %v is older than the current list", md.CreatedAt) + } + + // FIXME: consider only archiving on major version bumps + path := fmt.Sprintf("%s/%s.json", ll.cfg.HistoryDirectory, time.Now().Format("2006-01-02_1504")) + if err := ioutil.CommitData(path, msg); err != nil { + return nil, metadata.Metadata{}, err + } + if err := ioutil.CommitData(path+".sig", sig); err != nil { + return nil, metadata.Metadata{}, err + } + return msg, md, nil +} diff --git a/pkg/storage/loglist/metadata.go b/pkg/storage/loglist/metadata.go new file mode 100644 index 0000000..adacf81 --- /dev/null +++ b/pkg/storage/loglist/metadata.go @@ -0,0 +1,62 @@ +package loglist + +import "gitlab.torproject.org/rgdd/ct/pkg/metadata" + +// FIXME: helpers that should probably be in the upstream package + +func metadataFindLog(md metadata.Metadata, target metadata.Log) bool { + for _, operator := range md.Operators { + if findLog(operator.Logs, target) { + return true + } + } + return false +} + +func findLog(logs []metadata.Log, target metadata.Log) bool { + targetID, _ := target.Key.ID() + for _, log := range logs { + id, _ := log.Key.ID() + if id == targetID { + return true + } + } + return false +} + +func findKey(keys []metadata.LogKey, target metadata.Log) bool { + targetID, _ := target.Key.ID() + for _, key := range keys { + id, _ := key.ID() + if id == targetID { + return true + } + } + return false +} + +func metadataLogDiff(initial, other metadata.Metadata) (added []metadata.Log, removed []metadata.Log) { + return metadataNewLogsIn(initial, other), metadataNewLogsIn(other, initial) +} + +func metadataNewLogsIn(initial, other metadata.Metadata) (added []metadata.Log) { + for _, operator := range other.Operators { + for _, log := range operator.Logs { + if !metadataFindLog(initial, log) { + added = append(added, log) + } + } + } + return +} + +func checkLog(log metadata.Log) error { + return nil // FIXME: check valid key, url, mmd, state +} + +func skipLog(log metadata.Log) bool { + return log.State == nil || // logs without a state are considered misconfigured + log.State.Name == metadata.LogStatePending || // log is not yet relevant + log.State.Name == metadata.LogStateRetired || // log is not expected to be reachable + log.State.Name == metadata.LogStateRejected // log is not expected to be reachable +} diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go new file mode 100644 index 0000000..5e28aca --- /dev/null +++ b/pkg/storage/storage.go @@ -0,0 +1,155 @@ +// Package storage manages an index of certificates, a dynamically updated log +// list, and a monitor's state on the local file system in a single directory. +package storage + +import ( + "context" + "crypto/sha256" + "crypto/x509" + "fmt" + "net/http" + "path/filepath" + "time" + + "github.com/google/certificate-transparency-go/client" + "github.com/google/certificate-transparency-go/jsonclient" + "gitlab.torproject.org/rgdd/ct/pkg/metadata" + "rgdd.se/silent-ct/internal/ioutil" + "rgdd.se/silent-ct/internal/monitor" + "rgdd.se/silent-ct/pkg/storage/index" + "rgdd.se/silent-ct/pkg/storage/loglist" +) + +type Config struct { + Bootstrap bool // Whether a new storage should be bootstrapped in a non-existing directory + Directory string // Path to a directory where everything will be stored + + // Optional + AlertDelay time.Duration // Time before alerting on certificates that are unaccounted for + StaticLogs []metadata.Log // Static logs to configure in loglist + RemoveLogs []metadata.LogKey // Keys of logs to omit in loglist + HTTPTimeout time.Duration // HTTP timeout used when bootstrapping logs +} + +func (cfg *Config) CertificateIndexFile() string { return cfg.Directory + "/crt_index.json" } +func (cfg *Config) LegitimateCertificateDirectory() string { return cfg.Directory + "/crt_trusted" } +func (cfg *Config) DiscoveredCertificateDirectory() string { return cfg.Directory + "/crt_found" } +func (cfg *Config) MetadataFile() string { return cfg.Directory + "/metadata.json" } +func (cfg *Config) MetadataHistoryDirectory() string { return cfg.Directory + "/metadata_history" } +func (cfg *Config) MonitorStateDirectory() string { return cfg.Directory + "/monitor_state" } +func (cfg *Config) MonitorStateFile(logID [sha256.Size]byte) string { + return fmt.Sprintf("%s/%x.json", cfg.MonitorStateDirectory(), logID[:]) +} + +func (cfg *Config) directories() []string { + return []string{ + cfg.Directory, + cfg.LegitimateCertificateDirectory(), + cfg.DiscoveredCertificateDirectory(), + cfg.MetadataHistoryDirectory(), + cfg.MonitorStateDirectory(), + } +} + +func (cfg *Config) configure() error { + if cfg.Directory == "" { + return fmt.Errorf("directory is required") + } + if cfg.HTTPTimeout == 0 { + cfg.HTTPTimeout = 10 * time.Second + } + + path, err := filepath.Abs(cfg.Directory) + if err != nil { + return err + } + cfg.Directory = path + if err := ioutil.DirectoriesExist(cfg.directories()); err != nil { + if !cfg.Bootstrap { + return err + } + return ioutil.CreateDirectories(cfg.directories()) + } + return nil +} + +type Storage struct { + Config + index.Index + loglist.LogList +} + +func New(cfg Config) (Storage, error) { + err := cfg.configure() + if err != nil { + return Storage{}, err + } + + s := Storage{Config: cfg} + if s.Index, err = index.New(index.Config{ + PermitBootstrap: cfg.Bootstrap, + IndexFile: cfg.CertificateIndexFile(), + TrustDirectory: cfg.LegitimateCertificateDirectory(), + MatchDirectory: cfg.DiscoveredCertificateDirectory(), + AlertDelay: cfg.AlertDelay, + }); err != nil { + return Storage{}, err + } + + if s.LogList, err = loglist.New(loglist.Config{ + PermitBootstrap: cfg.Bootstrap, + MetadataFile: cfg.MetadataFile(), + HistoryDirectory: cfg.MetadataHistoryDirectory(), + StaticLogs: cfg.StaticLogs, + RemoveLogs: cfg.RemoveLogs, + }); err != nil { + return Storage{}, err + } + + return s, err +} + +func (s *Storage) BootstrapLog(ctx context.Context, log metadata.Log, skipBacklog bool) (monitor.State, error) { + storedState, err := s.GetMonitorState(log) + if err == nil { + return storedState, ErrorMonitorStateExists + } + + key, err := x509.MarshalPKIXPublicKey(log.Key.Public) + if err != nil { + return monitor.State{}, err + } + cli, err := client.New(string(log.URL), &http.Client{}, jsonclient.Options{PublicKeyDER: key}) + if err != nil { + return monitor.State{}, err + } + + sctx, cancel := context.WithTimeout(ctx, s.Config.HTTPTimeout) + defer cancel() + sth, err := cli.GetSTH(sctx) + if err != nil { + return monitor.State{}, err + } + id, _ := log.Key.ID() + sth.LogID = id + + state := monitor.State{SignedTreeHead: *sth} + if skipBacklog { + state.NextIndex = sth.TreeSize + } + return state, s.SetMonitorState(id, state) +} + +func (s *Storage) SetMonitorState(logID [sha256.Size]byte, state monitor.State) error { + return ioutil.CommitJSON(s.MonitorStateFile(logID), state) +} + +func (s *Storage) GetMonitorState(log metadata.Log) (monitor.State, error) { + id, err := log.Key.ID() + if err != nil { + return monitor.State{}, err + } + + state := monitor.State{} + return state, ioutil.ReadJSON(s.MonitorStateFile(id), &state) +} diff --git a/pkg/submission/submission.go b/pkg/submission/submission.go new file mode 100644 index 0000000..d33a49d --- /dev/null +++ b/pkg/submission/submission.go @@ -0,0 +1,104 @@ +// Package submission provides creation and opening of a node's legitimately +// issued certificate chains. A submission is composed of a name, a message +// authentication code, and one or more certificate chains in PEM format. +// +// Note: this package makes no attempt to parse any certificate chain. In other +// words, each certificate chain is treated as an opaque list of bytes. +package submission + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "strings" + + "rgdd.se/silent-ct/pkg/policy" +) + +const ( + Separator = "silent-ct:separator\n" +) + +type Submission []byte + +func New(node policy.Node, data [][]byte) (Submission, error) { + mac, err := node.HMAC(message(data)) + if err != nil { + return nil, fmt.Errorf("hmac: %v", err) + } + + buf := bytes.NewBuffer(nil) + buf.WriteString(fmt.Sprintf("%s %x\n", node.Name, mac[:])) + buf.Write(message(data)) + return buf.Bytes(), nil +} + +func (s *Submission) Peek() (name string, err error) { + name, _, _, err = s.split() + return +} + +func (s *Submission) Open(node policy.Node) ([][]byte, error) { + name, gotMAC, msg, err := s.split() + if err != nil { + return nil, err + } + if name != node.Name { + return nil, fmt.Errorf("wrong node name %s", name) + } + wantMAC, err := node.HMAC(msg) + if err != nil { + return nil, fmt.Errorf("hmac: %v", err) + } + if !hmac.Equal(gotMAC[:], wantMAC[:]) { + return nil, fmt.Errorf("hmac: mismatch") + } + + return bytes.Split(msg, []byte(Separator)), nil +} + +func (s *Submission) split() (name string, mac [sha256.Size]byte, msg []byte, err error) { + i := bytes.IndexByte(*s, '\n') + if i == -1 { + return name, mac, msg, fmt.Errorf("no authorization line") + } + authLine := (*s)[:i] + msg = (*s)[i+1:] + + split := strings.Split(string(authLine), " ") + if len(split) != 2 { + return name, mac, msg, fmt.Errorf("invalid authorization line") + } + b, err := hex.DecodeString(split[1]) + if err != nil { + return name, mac, msg, fmt.Errorf("mac: %v", err) + } + if len(b) != len(mac) { + return name, mac, msg, fmt.Errorf("mac: length must be %d bytes", len(mac)) + } + + name = split[0] + copy(mac[:], b) + return +} + +func message(data [][]byte) []byte { + if len(data) == 0 { + return []byte{} + } + + buf := bytes.NewBuffer(data[0]) + for i, d := range data[1:] { + prev := data[i] + if prev[len(prev)-1] != '\n' { + buf.Write([]byte("\n")) + } + + buf.Write([]byte(Separator)) + buf.Write(d) + } + + return buf.Bytes() +} -- cgit v1.2.3