diff options
Diffstat (limited to 'pkg')
-rw-r--r-- | pkg/crtutil/crt_util.go | 71 | ||||
-rw-r--r-- | pkg/monitor/chunks.go | 88 | ||||
-rw-r--r-- | pkg/monitor/errors.go | 41 | ||||
-rw-r--r-- | pkg/monitor/matcher.go | 90 | ||||
-rw-r--r-- | pkg/monitor/messages.go | 40 | ||||
-rw-r--r-- | pkg/monitor/monitor.go | 286 | ||||
-rw-r--r-- | pkg/policy/node.go | 93 | ||||
-rw-r--r-- | pkg/policy/policy.go | 18 | ||||
-rw-r--r-- | pkg/policy/wildcard.go | 83 | ||||
-rw-r--r-- | pkg/server/handler.go | 32 | ||||
-rw-r--r-- | pkg/server/messages.go | 23 | ||||
-rw-r--r-- | pkg/server/nodes.go | 53 | ||||
-rw-r--r-- | pkg/server/server.go | 133 | ||||
-rw-r--r-- | pkg/storage/errors.go | 5 | ||||
-rw-r--r-- | pkg/storage/index/index.go | 103 | ||||
-rw-r--r-- | pkg/storage/index/inmem.go | 113 | ||||
-rw-r--r-- | pkg/storage/loglist/loglist.go | 145 | ||||
-rw-r--r-- | pkg/storage/loglist/metadata.go | 62 | ||||
-rw-r--r-- | pkg/storage/storage.go | 155 | ||||
-rw-r--r-- | pkg/submission/submission.go | 104 |
20 files changed, 952 insertions, 786 deletions
diff --git a/pkg/crtutil/crt_util.go b/pkg/crtutil/crt_util.go new file mode 100644 index 0000000..11bcd7e --- /dev/null +++ b/pkg/crtutil/crt_util.go @@ -0,0 +1,71 @@ +// Package crtutil provides utility functions for working with certificates. +package crtutil + +import ( + "crypto/sha256" + "encoding/pem" + "fmt" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/x509" +) + +// CertificateChainFromPEM parses a certificate chain in PEM format. At least +// one certificate must be in the chain. The first certificate must be a leaf, +// whereas all other certificates must CA certificates (intermdiates/roots). +func CertificateChainFromPEM(b []byte) ([]x509.Certificate, error) { + var chain []x509.Certificate + + for { + block, rest := pem.Decode(b) + if block == nil { + break + } + crt, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parse certificate: %v", err) + } + + chain = append(chain, *crt) + b = rest + } + + if len(chain) == 0 { + return nil, fmt.Errorf("no certificates in the provided chain") + } + if chain[0].IsCA { + return nil, fmt.Errorf("leaf certificate has the CA bit set") + } + for _, crt := range chain[1:] { + if !crt.IsCA { + return nil, fmt.Errorf("non-leaf certificate without the CA bit set") + } + } + + return chain, nil +} + +// CertificateFromLogEntry parses the (pre-)certificate in a log entry +func CertificateFromLogEntry(leafData, extraData []byte) (x509.Certificate, error) { + entry, err := ct.LogEntryFromLeaf(0, &ct.LeafEntry{LeafInput: leafData, ExtraData: extraData}) + if err != nil { + return x509.Certificate{}, fmt.Errorf("parse leaf: %v", err) + } + if entry.Precert == nil && entry.X509Cert == nil { + return x509.Certificate{}, fmt.Errorf("neither precertificate nor certificate in leaf") + } + if entry.Precert != nil && entry.X509Cert != nil { + return x509.Certificate{}, fmt.Errorf("both certificate and precertificate in leaf") + } + if entry.Precert != nil { + return *entry.Precert.TBSCertificate, nil + } + return *entry.X509Cert, nil +} + +// UniqueID derives a unique certificate ID. The same value is derived +// regardless of if the (pre-)certificate is logged multiple times. +func UniqueID(crt x509.Certificate) string { + h := sha256.Sum256([]byte(crt.SerialNumber.String())) + return fmt.Sprintf("FIXME:%x", h[:]) // not a secure mapping +} diff --git a/pkg/monitor/chunks.go b/pkg/monitor/chunks.go deleted file mode 100644 index 87871b9..0000000 --- a/pkg/monitor/chunks.go +++ /dev/null @@ -1,88 +0,0 @@ -package monitor - -// -// A min heap of chunks, oredered on each chunk's start index. -// -// Credit: inspiration to use a heap from Aaron Gable, see -// https://github.com/aarongable/ctaudit -// - -import ( - "container/heap" - "crypto/sha256" -) - -type chunk struct { - startIndex uint64 // Index of the first leaf - leafHashes [][sha256.Size]byte // List of consecutive leaf hashes - matches []LogEntry // Leaves that matches some criteria - errors []error // Errors that ocurred while parsing leaves -} - -type chunks []*chunk - -func newChunks() *chunks { - var h chunks - heap.Init((*internal)(&h)) - return &h -} - -func (h *chunks) push(c *chunk) { - heap.Push((*internal)(h), c) -} - -func (h *chunks) pop() *chunk { - x := heap.Pop((*internal)(h)) - return x.(*chunk) -} - -// gap returns true if there's a gap between the provided start index and the -// top most chunk. If the top most chunk is in sequence, it is merged with -// any following chunks that are also in sequence to form one larger chunk. -func (h *chunks) gap(start uint64) bool { - if len(*h) == 0 { - return true - } - - top := h.pop() - if start != top.startIndex { - h.push(top) - return true - } - - for len(*h) > 0 { - c := h.pop() - if c.startIndex != top.startIndex+uint64(len(top.leafHashes)) { - h.push(c) - break - } - - top.leafHashes = append(top.leafHashes, c.leafHashes...) - top.matches = append(top.matches, c.matches...) - top.errors = append(top.errors, c.errors...) - } - - h.push(top) - return false -} - -// internal implements the heap interface, see example: -// https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/container/heap/example_intheap_test.go -type internal chunks - -func (h internal) Len() int { return len(h) } -func (h internal) Less(i, j int) bool { return h[i].startIndex < h[j].startIndex } -func (h internal) Swap(i, j int) { h[i], h[j] = h[j], h[i] } - -func (h *internal) Push(x any) { - *h = append(*h, x.(*chunk)) -} - -func (h *internal) Pop() any { - old := *h - n := len(old) - x := old[n-1] - old[n-1] = nil // avoid memory leak - *h = old[:n-1] - return x -} diff --git a/pkg/monitor/errors.go b/pkg/monitor/errors.go deleted file mode 100644 index 4d676af..0000000 --- a/pkg/monitor/errors.go +++ /dev/null @@ -1,41 +0,0 @@ -package monitor - -import ( - "fmt" - - ct "github.com/google/certificate-transparency-go" -) - -// ErrorFetch occurs if there's a problem hitting the log's HTTP API. An STH is -// provided if available, since it might carry evidence of some log misbehavior. -type ErrorFetch struct { - URL string - Msg string - Err error - STH *ct.SignedTreeHead -} - -func (e ErrorFetch) Error() string { - return fmt.Sprintf("%s: %s: %v", e.URL, e.Msg, e.Err) -} - -// ErrorMerkleTree occurs if the log's Merkle tree can't be verified. An STH is -// provided if available (i.e., won't be available for internal tree building). -type ErrorMerkleTree struct { - URL string - Msg string - Err error - STH *ct.SignedTreeHead -} - -func (e ErrorMerkleTree) Error() string { - return fmt.Sprintf("%s: %s: %v", e.URL, e.Msg, e.Err) -} - -// TODO: MMD violations -// TODO: Growing read-only logs - -// noout implements the Logger interface to discard unwanted output -type noout struct{} - -func (n *noout) Printf(string, ...interface{}) {} diff --git a/pkg/monitor/matcher.go b/pkg/monitor/matcher.go deleted file mode 100644 index fa3a894..0000000 --- a/pkg/monitor/matcher.go +++ /dev/null @@ -1,90 +0,0 @@ -package monitor - -import ( - "fmt" - "strings" - - ct "github.com/google/certificate-transparency-go" -) - -type Matcher interface { - Match(leafInput, extraData []byte) (bool, error) -} - -// MatchAll matches all certificates -type MatchAll struct{} - -func (m *MatchAll) Match(leafInput, extraData []byte) (bool, error) { - return true, nil -} - -// MatchWildcards matches a list of wildcards, see the MatchWildcard type -type MatchWildcards []MatchWildcard - -func (m *MatchWildcards) Match(leafInput, extraData []byte) (bool, error) { - sans, err := getSANs(ct.LeafEntry{LeafInput: leafInput, ExtraData: extraData}) - if err != nil { - return false, err - } - return m.match(sans), nil -} - -func (m *MatchWildcards) match(sans []string) bool { - for _, mw := range (*m)[:] { - if mw.match(sans) { - return true - } - } - return false -} - -// MatchWildcard exclude matches for `.*<Exclude>\.<Wildcard>`, but will -// otherwise match on any `.*\.<Wildcard>` as well as SANs equal to <Wildcard>. -// -// For example, let <Wildcard> be example.org and Exclude be [foo, bar]. Then -// example.org and www.example.org would match, whereas foo.example.org, -// sub.foo.example.org, and bar.example.org. would not match. -type MatchWildcard struct { - Wildcard string `json:"wildcard"` - Excludes []string `json:"excludes"` -} - -func (m *MatchWildcard) match(sans []string) bool { - for _, san := range sans { - if san == m.Wildcard { - return true - } - if strings.HasSuffix(san, "."+m.Wildcard) && !m.exclude(san) { - return true - } - } - return false -} - -func (m *MatchWildcard) exclude(san string) bool { - for _, exclude := range m.Excludes { - suffix := exclude + "." + m.Wildcard - if strings.HasSuffix(san, suffix) { - return true - } - } - return false -} - -func getSANs(entry ct.LeafEntry) ([]string, error) { - // Warning: here be dragons, parsing of DNS names in certificates... - e, err := ct.LogEntryFromLeaf(0, &entry) - if err != nil { - return nil, fmt.Errorf("parse leaf: %v", err) - } - if e.Precert == nil && e.X509Cert == nil { - return nil, fmt.Errorf("neither precertificate nor certificate in leaf") - } - if e.Precert != nil && e.X509Cert != nil { - return nil, fmt.Errorf("both certificate and precertificate in leaf") - } - if e.Precert != nil { - return e.Precert.TBSCertificate.DNSNames, nil - } - return e.X509Cert.DNSNames, nil -} diff --git a/pkg/monitor/messages.go b/pkg/monitor/messages.go deleted file mode 100644 index 717aae6..0000000 --- a/pkg/monitor/messages.go +++ /dev/null @@ -1,40 +0,0 @@ -package monitor - -import ( - ct "github.com/google/certificate-transparency-go" - "gitlab.torproject.org/rgdd/ct/pkg/metadata" -) - -// MessageLogConfig provides information about a log the monitor is downloading -type MessageLogConfig struct { - Metadata metadata.Log - State MonitorState -} - -// MessageLogProgress is the next log state and any encountered leaves that were -// considered matching since the previous log state. Parse errors are included. -type MessageLogProgress struct { - State MonitorState - Matches []LogEntry - Errors []error -} - -// MonitorState describes the monitor's state for a particular log. The signed tree -// head is the latest verified append-only state that was observed. The index -// is the next leaf which will be downloaded and processed by the monitor. -type MonitorState struct { - LogState - NextIndex uint64 -} - -// LogState describes the state of a log -type LogState struct { - ct.SignedTreeHead -} - -// LogEntry is a leaf in a log's Merkle tree -type LogEntry struct { - LeafIndex uint64 - LeafData []byte - ExtraData []byte -} diff --git a/pkg/monitor/monitor.go b/pkg/monitor/monitor.go deleted file mode 100644 index 5f7a629..0000000 --- a/pkg/monitor/monitor.go +++ /dev/null @@ -1,286 +0,0 @@ -// Package monitor provides a Certificate Transparency monitor that tails a list -// of logs which can be updated dynamically while running. All emitted progress -// messages have been verified by the monitor to be included in the log's -// append-only Merkle tree with regard to the initial start-up state. It is up -// to the user to process the monitor's progress, errors, and persist state. -// -// Implement the Matcher interface to customize which certificates should be -// included in a log's progress messages, or use any of the existing matchers -// provided by this package (see for example MatchAll and MatchWildcards). -package monitor - -import ( - "context" - "crypto/sha256" - "fmt" - "net/http" - "sync" - "time" - - ct "github.com/google/certificate-transparency-go" - "github.com/google/certificate-transparency-go/client" - "github.com/google/certificate-transparency-go/jsonclient" - "github.com/google/certificate-transparency-go/scanner" - "rgdd.se/silent-ct/internal/merkle" -) - -const ( - UserAgentPrefix = "rgdd.se/silent-ct" - DefaultContact = "unknown-user" - DefaultChunkSize = 256 // TODO: increase me - DefaultBatchSize = 128 // TODO: increase me - DefaultNumWorkers = 2 -) - -type Config struct { - Contact string // Something that help log operators get in touch - ChunkSize int // Min number of leaves to propagate a chunk without matches - BatchSize int // Max number of certificates to accept per worker - NumWorkers int // Number of parallel workers to use for each log - - // Callback determines which certificates are interesting to detect - Callback Matcher -} - -type Monitor struct { - Config -} - -func New(cfg Config) (Monitor, error) { - if cfg.Contact == "" { - cfg.Contact = "unknown-user" - } - if cfg.ChunkSize <= 0 { - cfg.ChunkSize = DefaultChunkSize - } - if cfg.BatchSize <= 0 { - cfg.BatchSize = DefaultBatchSize - } - if cfg.NumWorkers <= 0 { - cfg.NumWorkers = DefaultNumWorkers - } - if cfg.Callback == nil { - cfg.Callback = &MatchAll{} - } - return Monitor{Config: cfg}, nil -} - -func (mon *Monitor) Run(ctx context.Context, metadataCh chan []MessageLogConfig, eventCh chan MessageLogProgress, errorCh chan error) { - var wg sync.WaitGroup - var sctx context.Context - var cancel context.CancelFunc - - for { - select { - case <-ctx.Done(): - return - case metadata := <-metadataCh: - fmt.Printf("DEBUG: received new list with %d logs\n", len(metadata)) - if cancel != nil { - fmt.Printf("DEBUG: stopping all log tailers\n") - cancel() - wg.Wait() - } - - sctx, cancel = context.WithCancel(ctx) - for _, md := range metadata { - fmt.Printf("DEBUG: starting log tailer %s\n", md.Metadata.URL) - wg.Add(1) - go func(lcfg MessageLogConfig) { - defer wg.Done() - - opts := jsonclient.Options{Logger: &noout{}, UserAgent: UserAgentPrefix + ":" + mon.Contact} - cli, err := client.New(string(lcfg.Metadata.URL), &http.Client{}, opts) - if err != nil { - errorCh <- fmt.Errorf("unable to configure %s: %v", lcfg.Metadata.URL, err) - return - } - - chunkCh := make(chan *chunk) - defer close(chunkCh) - - t := tail{mon.Config, *cli, chunkCh, eventCh, errorCh} - if err := t.run(sctx, lcfg.State); err != nil { - errorCh <- fmt.Errorf("unable to continue tailing %s: %v", lcfg.Metadata.URL, err) - } - }(md) - } - } - } -} - -type tail struct { - mcfg Config - cli client.LogClient - - chunkCh chan *chunk - eventCh chan MessageLogProgress - errorCh chan error -} - -func (t *tail) run(ctx context.Context, state MonitorState) error { - var wg sync.WaitGroup - defer wg.Wait() - - mctx, cancel := context.WithCancel(ctx) - defer cancel() - - wg.Add(1) - go func() { - defer wg.Done() - defer cancel() - t.sequence(mctx, state) - }() - - fetcher := scanner.NewFetcher(&t.cli, &scanner.FetcherOptions{ - BatchSize: t.mcfg.BatchSize, - StartIndex: int64(state.NextIndex), - ParallelFetch: t.mcfg.NumWorkers, - Continuous: true, - }) - callback := func(eb scanner.EntryBatch) { - c := chunk{startIndex: uint64(eb.Start)} - for i := 0; i < len(eb.Entries); i++ { - c.leafHashes = append(c.leafHashes, merkle.HashLeafNode(eb.Entries[i].LeafInput)) - match, err := t.mcfg.Callback.Match(eb.Entries[i].LeafInput, eb.Entries[i].ExtraData) - if err != nil { - c.errors = append(c.errors, fmt.Errorf("while processing index %d for %s: %v", i, t.cli.BaseURI(), err)) - } else if match { - c.matches = append(c.matches, LogEntry{ - LeafIndex: uint64(i), - LeafData: eb.Entries[i].LeafInput, - ExtraData: eb.Entries[i].ExtraData, - }) - } - } - t.chunkCh <- &c - } - return fetcher.Run(mctx, callback) -} - -func (t *tail) sequence(ctx context.Context, state MonitorState) { - heap := newChunks() - for { - select { - case <-ctx.Done(): - return - case c := <-t.chunkCh: - heap.push(c) - if heap.gap(state.NextIndex) { - continue - } - c = heap.pop() - if len(c.matches) == 0 && len(c.leafHashes) < t.mcfg.ChunkSize { - heap.push(c) - continue // TODO: don't trigger if we havn't run nextState for too long - } - nextState, err := t.nextState(ctx, state, c) - if err != nil { - t.errorCh <- err - heap.push(c) - continue - } - - state = nextState - t.eventCh <- MessageLogProgress{State: state, Matches: c.matches, Errors: c.errors} - } - } -} - -func (t *tail) nextState(ctx context.Context, state MonitorState, c *chunk) (MonitorState, error) { - newState, err := t.nextConsistentState(ctx, state) - if err != nil { - return MonitorState{}, err - } - newState, err = t.nextIncludedState(ctx, state, c) - if err != nil { - return MonitorState{}, err - } - return newState, nil -} - -func (t *tail) nextConsistentState(ctx context.Context, state MonitorState) (MonitorState, error) { - sth, err := getSignedTreeHead(ctx, &t.cli) - if err != nil { - return MonitorState{}, ErrorFetch{URL: t.cli.BaseURI(), Msg: "get-sth", Err: err} - } - oldSize := state.TreeSize - oldRoot := state.SHA256RootHash - newSize := sth.TreeSize - newRoot := sth.SHA256RootHash - - proof, err := getConsistencyProof(ctx, &t.cli, oldSize, newSize) - if err != nil { - return MonitorState{}, ErrorFetch{URL: t.cli.BaseURI(), Msg: "get-consistency", STH: sth, Err: err} - } - if err := merkle.VerifyConsistency(oldSize, newSize, oldRoot, newRoot, unslice(proof)); err != nil { - return MonitorState{}, ErrorMerkleTree{URL: t.cli.BaseURI(), Msg: "consistency", STH: sth, Err: err} - } - - fmt.Printf("DEBUG: consistently updated STH from size %d to %d\n", oldSize, newSize) - return MonitorState{LogState: LogState{*sth}, NextIndex: state.NextIndex}, nil -} - -func (t *tail) nextIncludedState(ctx context.Context, state MonitorState, c *chunk) (MonitorState, error) { - leafHash := c.leafHashes[0] - oldSize := state.NextIndex + uint64(len(c.leafHashes)) - iproof, err := getInclusionProof(ctx, &t.cli, leafHash, oldSize) - if err != nil { - err = fmt.Errorf("leaf hash %x and tree size %d: %v", leafHash[:], oldSize, err) - return MonitorState{}, ErrorFetch{URL: t.cli.BaseURI(), Msg: "get-inclusion", Err: err} - } - if got, want := uint64(iproof.LeafIndex), state.NextIndex; got != want { - err := fmt.Errorf("leaf hash %x and tree size %d: expected leaf index %d but got %d", leafHash[:], oldSize, got, want) - return MonitorState{}, ErrorMerkleTree{URL: t.cli.BaseURI(), Msg: "proof-index", Err: err} - } - oldRoot, err := merkle.TreeHeadFromRangeProof(c.leafHashes, state.NextIndex, unslice(iproof.AuditPath)) - if err != nil { - return MonitorState{}, ErrorMerkleTree{URL: t.cli.BaseURI(), Msg: "inclusion", Err: err} - } - - newSize := state.TreeSize - newRoot := state.SHA256RootHash - cproof, err := getConsistencyProof(ctx, &t.cli, oldSize, newSize) - if err != nil { - err = fmt.Errorf("from size %d to %d: %v", oldSize, newSize, err) - return MonitorState{}, ErrorFetch{URL: t.cli.BaseURI(), Msg: "get-consistency", Err: err} - } - if err := merkle.VerifyConsistency(oldSize, newSize, oldRoot, newRoot, unslice(cproof)); err != nil { - err = fmt.Errorf("from size %d to %d: %v", oldSize, newSize, err) - return MonitorState{}, ErrorMerkleTree{URL: t.cli.BaseURI(), Msg: "consistency", Err: err} - } - - state.NextIndex += uint64(len(c.leafHashes)) - return state, nil -} - -func getInclusionProof(ctx context.Context, cli *client.LogClient, leafHash [sha256.Size]byte, size uint64) (*ct.GetProofByHashResponse, error) { - rctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - return cli.GetProofByHash(rctx, leafHash[:], size) -} - -func getConsistencyProof(ctx context.Context, cli *client.LogClient, oldSize, newSize uint64) ([][]byte, error) { - if oldSize == 0 || oldSize >= newSize { - return [][]byte{}, nil - } - rctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - return cli.GetSTHConsistency(rctx, oldSize, newSize) -} - -func getSignedTreeHead(ctx context.Context, cli *client.LogClient) (*ct.SignedTreeHead, error) { - rctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - return cli.GetSTH(rctx) -} - -func unslice(hashes [][]byte) [][sha256.Size]byte { - var ret [][sha256.Size]byte - for _, hash := range hashes { - var h [sha256.Size]byte - copy(h[:], hash) - ret = append(ret, h) - } - return ret -} diff --git a/pkg/policy/node.go b/pkg/policy/node.go new file mode 100644 index 0000000..23f04ca --- /dev/null +++ b/pkg/policy/node.go @@ -0,0 +1,93 @@ +package policy + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + + "golang.org/x/crypto/hkdf" +) + +type Node struct { + Name string `json:"name"` // Artbirary node name to authenticate + Secret string `json:"secret"` // Arbitrary node secret for authentication + URL string `json:"url"` // Where the node's submissions can be downloaded + Domains []string `json:"issues"` // Exact-match domain names allowed to be issued + + key [16]byte +} + +func NewNode(name, secret, url string, domains []string) (Node, error) { + n := Node{Name: name, Secret: secret, Domains: domains, URL: url} + if err := n.deriveKey(); err != nil { + return Node{}, err + } + return n, n.Validate() +} + +func (n *Node) UnmarshalJSON(data []byte) error { + type internal Node + if err := json.Unmarshal(data, (*internal)(n)); err != nil { + return err + } + if err := n.deriveKey(); err != nil { + return err + } + return n.Validate() +} + +func (n *Node) Validate() error { + if n.Name == "" { + return fmt.Errorf("name is required") + } + if n.Secret == "" { + return fmt.Errorf("secret is required") + } + if n.URL == "" { + return fmt.Errorf("url is required") + } + if n.key == [16]byte{} { + return fmt.Errorf("key needs to be derived") + } + return nil +} + +func (n *Node) Authorize(sans []string) error { + for _, san := range sans { + ok := false + for _, domain := range n.Domains { + if domain == san { + ok = true + break + } + } + + if !ok { + return fmt.Errorf("node %s is not authorized to issue certificate with name %s", n.Name, san) + } + } + return nil +} + +func (n *Node) HMAC(data []byte) (mac [sha256.Size]byte, err error) { + if err = n.Validate(); err != nil { + return + } + + h := hmac.New(sha256.New, n.key[:]) + _, err = h.Write(data) + + copy(mac[:], h.Sum(nil)) + return +} + +func (n *Node) deriveKey() error { + const salt = "silent-ct" + + hkdf := hkdf.New(sha256.New, []byte(n.Secret), []byte(salt), []byte(n.Name)) + _, err := io.ReadFull(hkdf, n.key[:]) + + return err +} diff --git a/pkg/policy/policy.go b/pkg/policy/policy.go new file mode 100644 index 0000000..8ee4867 --- /dev/null +++ b/pkg/policy/policy.go @@ -0,0 +1,18 @@ +// Package policy specifies which certificates to look for while monitoring, and +// how to pull legitimately issued certificates from trusted nodes based on a +// shared secret. Statically configured logs can also be specified, as well as +// logs that should not be monitored even if they appear in any dynamic list. +package policy + +import ( + "gitlab.torproject.org/rgdd/ct/pkg/metadata" +) + +type Policy struct { + Monitor Wildcards `json:"monitor"` + Nodes []Node `json:"nodes"` + + // Optional + StaticLogs []metadata.Log `json:"static_logs"` + RemoveLogs []metadata.LogKey `json:"remove_logs"` +} diff --git a/pkg/policy/wildcard.go b/pkg/policy/wildcard.go new file mode 100644 index 0000000..58b0d17 --- /dev/null +++ b/pkg/policy/wildcard.go @@ -0,0 +1,83 @@ +package policy + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "rgdd.se/silent-ct/pkg/crtutil" +) + +// Wildcards implement the monitor.Matcher interface for a list of wildcards. +// +// Warning: parsing of SANs in certificates is hard. This matcher depends on +// the parsing defined in github.com/google/certificate-transparency-go/x509. +type Wildcards []Wildcard + +func (w *Wildcards) Match(leafData, extraData []byte) (bool, error) { + crt, err := crtutil.CertificateFromLogEntry(leafData, extraData) + if err != nil { + return false, err + } + return w.match(crt.DNSNames, crt.NotAfter), nil +} + +func (w *Wildcards) match(sans []string, notAfter time.Time) bool { + for _, wildcard := range *w { + if wildcard.Match(sans, notAfter) { + return true + } + } + return false +} + +// Wildcard matches any string that ends with `Wildcard`, unless: +// +// 1. `Excludes[i] + "." + Wildcard` is a longer suffix match, or +// 2. the certificate expired before the BootstrapAt timestamp. +type Wildcard struct { + BootstrapAt time.Time `json:"bootstrap_at"` + Wildcard string `json:"wildcard"` + Excludes []string `json:"excludes",omitempty"` +} + +func (w *Wildcard) UnmarshalJSON(data []byte) error { + type internal Wildcard + if err := json.Unmarshal(data, (*internal)(w)); err != nil { + return err + } + return w.Validate() +} + +func (w *Wildcard) Validate() error { + if w.BootstrapAt.IsZero() { + return fmt.Errorf("bootstrap time is required") + } + if len(w.Wildcard) == 0 { + return fmt.Errorf("wildcard is required") + } + return nil +} + +func (w *Wildcard) Match(sans []string, expiresAt time.Time) bool { + for _, san := range sans { + if san == w.Wildcard { + return w.BootstrapAt.Before(expiresAt) + } + if strings.HasSuffix(san, "."+w.Wildcard) && !w.exclude(san) { + return w.BootstrapAt.Before(expiresAt) + } + } + return false +} + +func (w *Wildcard) exclude(san string) bool { + for _, exclude := range w.Excludes { + suffix := exclude + "." + w.Wildcard + if strings.HasSuffix(san, suffix) { + return true + } + } + return false +} diff --git a/pkg/server/handler.go b/pkg/server/handler.go deleted file mode 100644 index 6d17af7..0000000 --- a/pkg/server/handler.go +++ /dev/null @@ -1,32 +0,0 @@ -package server - -import ( - "fmt" - "net/http" -) - -// handler implements the http.Handler interface -type handler struct { - method string - endpoint string - callback func(w http.ResponseWriter, r *http.Request) (int, string) -} - -func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.Header().Set("Cache-Control", "no-cache") - if r.Method != h.method { - http.Error(w, "Invalid HTTP method, expected "+h.method, http.StatusMethodNotAllowed) - return - } - code, text := h.callback(w, r) - if code != http.StatusOK { - http.Error(w, text, code) - return - } - fmt.Fprintf(w, fmt.Sprintf("%s\n", text)) -} - -func (h handler) register(mux *http.ServeMux) { - mux.Handle("/"+h.endpoint, h) -} diff --git a/pkg/server/messages.go b/pkg/server/messages.go deleted file mode 100644 index a6ea243..0000000 --- a/pkg/server/messages.go +++ /dev/null @@ -1,23 +0,0 @@ -package server - -import ( - "fmt" - "time" -) - -type MessageNodeSubmission struct { - SerialNumber string - NotBefore time.Time - DomainNames []string - PEMChain []byte -} - -type ErrorUnauthorizedDomainName struct { - PEMChain []byte - Node Node - Err error -} - -func (e ErrorUnauthorizedDomainName) Error() string { - return fmt.Sprintf("%v", e.Err) -} diff --git a/pkg/server/nodes.go b/pkg/server/nodes.go deleted file mode 100644 index 164c06f..0000000 --- a/pkg/server/nodes.go +++ /dev/null @@ -1,53 +0,0 @@ -package server - -import ( - "crypto/x509" - "fmt" - "net/http" -) - -// Node is an identified system that can request certificates -type Node struct { - Name string `json:"name"` // Artbirary node name for authentication - Secret string `json:"secret"` // Arbitrary node secret for authentication - Domains []string `json:"issues"` // Exact-match domain names that are allowed -} - -func (node *Node) authenticate(r *http.Request) error { - user, password, ok := r.BasicAuth() - if !ok { - return fmt.Errorf("no http basic auth credentials") - } - if user != node.Name || password != node.Secret { - return fmt.Errorf("invalid http basic auth credentials") - } - return nil -} - -func (node *Node) check(crt x509.Certificate) error { - for _, san := range crt.DNSNames { - ok := false - for _, domain := range node.Domains { - if domain == san { - ok = true - break - } - } - if !ok { - return fmt.Errorf("%s: not authorized to issue certificates for %s", node.Name, san) - } - } - return nil -} - -// Nodes is a list of nodes that can request certificates -type Nodes []Node - -func (nodes *Nodes) authenticate(r *http.Request) (Node, error) { - for _, node := range (*nodes)[:] { - if err := node.authenticate(r); err == nil { - return node, nil - } - } - return Node{}, fmt.Errorf("no valid HTTP basic auth credentials") -} diff --git a/pkg/server/server.go b/pkg/server/server.go deleted file mode 100644 index 06eb258..0000000 --- a/pkg/server/server.go +++ /dev/null @@ -1,133 +0,0 @@ -package server - -import ( - "context" - "fmt" - "io" - "net" - "net/http" - "time" - - "rgdd.se/silent-ct/internal/x509util" -) - -const ( - EndpointAddChain = "add-chain" - EndpointGetStatus = "get-status" - - DefaultNetwork = "tcp" - DefaultAddress = "localhost:2009" - DefaultConfigFile = "/home/rgdd/.config/silent-ct/config.json" // FIXME -) - -type Config struct { - Network string // tcp or unix - Address string // hostname[:port] or path to a unix socket - Nodes Nodes // Which nodes are trusted to issue what certificates -} - -type Server struct { - Config - - eventCh chan MessageNodeSubmission - errorCh chan error -} - -func New(cfg Config) (Server, error) { - if cfg.Network == "" { - cfg.Network = DefaultNetwork - } - if cfg.Address == "" { - cfg.Network = DefaultAddress - } - return Server{Config: cfg}, nil -} - -func (srv *Server) Run(ctx context.Context, submitCh chan MessageNodeSubmission, errorCh chan error) error { - srv.eventCh = submitCh - srv.errorCh = errorCh - mux := http.NewServeMux() - for _, handler := range srv.handlers() { - handler.register(mux) - } - - listener, err := net.Listen(srv.Network, srv.Address) - if err != nil { - return fmt.Errorf("listen: %v", err) - } - defer listener.Close() - - s := http.Server{Handler: mux} - exitCh := make(chan error, 1) - defer close(exitCh) - go func() { - exitCh <- s.Serve(listener) - }() - - select { - case err := <-exitCh: - if err != nil && err != http.ErrServerClosed { - return fmt.Errorf("serve: %v", err) - } - case <-ctx.Done(): - tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - if err := s.Shutdown(tctx); err != nil { - return fmt.Errorf("shutdown: %v", err) - } - } - return nil -} - -func (srv *Server) handlers() []handler { - return []handler{ - handler{ - method: http.MethodGet, - endpoint: EndpointGetStatus, - callback: func(w http.ResponseWriter, r *http.Request) (int, string) { return srv.getStatus(w, r) }, - }, - handler{ - method: http.MethodPost, - endpoint: EndpointAddChain, - callback: func(w http.ResponseWriter, r *http.Request) (int, string) { return srv.addChain(w, r) }, - }, - } -} - -func (srv *Server) getStatus(w http.ResponseWriter, r *http.Request) (int, string) { - return http.StatusOK, "OK" -} - -func (srv *Server) addChain(w http.ResponseWriter, r *http.Request) (int, string) { - node, err := srv.Nodes.authenticate(r) - if err != nil { - return http.StatusForbidden, "Invalid HTTP Basic Auth credentials" - } - - b, err := io.ReadAll(r.Body) - if err != nil { - return http.StatusBadRequest, "Read HTTP POST body failed" - } - defer r.Body.Close() - - chain, err := x509util.ParseChain(b) - if err != nil { - return http.StatusBadRequest, "Malformed HTTP POST body" - } - if err := node.check(chain[0]); err != nil { - srv.errorCh <- ErrorUnauthorizedDomainName{ - PEMChain: b, - Node: node, - Err: err, - } - } else { - srv.eventCh <- MessageNodeSubmission{ - SerialNumber: chain[0].SerialNumber.String(), - NotBefore: chain[0].NotBefore, - DomainNames: chain[0].DNSNames, - PEMChain: b, - } - } - - return http.StatusOK, "OK" -} diff --git a/pkg/storage/errors.go b/pkg/storage/errors.go new file mode 100644 index 0000000..3d7997f --- /dev/null +++ b/pkg/storage/errors.go @@ -0,0 +1,5 @@ +package storage + +import "errors" + +var ErrorMonitorStateExists = errors.New("monitor state already exists on disk") diff --git a/pkg/storage/index/index.go b/pkg/storage/index/index.go new file mode 100644 index 0000000..ef9ad60 --- /dev/null +++ b/pkg/storage/index/index.go @@ -0,0 +1,103 @@ +// Package index provides an index of locally stored certificates. If a method +// succeeds, the index and the data that it tracks has been persisted to disk. +// If a method does not succeed, restore from the persisted index on disk. +package index + +import ( + "crypto/sha256" + "fmt" + "time" + + "rgdd.se/silent-ct/internal/ioutil" + "rgdd.se/silent-ct/internal/monitor" + "rgdd.se/silent-ct/pkg/crtutil" +) + +type Config struct { + PermitBootstrap bool // Create a new index if a valid one does not exist on disk yet + IndexFile string // Path to an index file that can be read/written + TrustDirectory string // Absolute path to an existing directory where legitimate certificates are stored + MatchDirectory string // Absolute path to an existing directory where matching certificates are stored + + // Optional + AlertDelay time.Duration // Time before alerting on certificates that are unaccounted for +} + +type Index struct { + mem index + cfg Config +} + +func New(cfg Config) (Index, error) { + ix := Index{cfg: cfg} + if err := ioutil.DirectoriesExist([]string{cfg.TrustDirectory, cfg.MatchDirectory}); err != nil { + return Index{}, err + } + if err := ioutil.ReadJSON(cfg.IndexFile, &ix.mem); err != nil { + if !cfg.PermitBootstrap { + return Index{}, err + } + + ix.mem = newIndex() + if err := ioutil.CommitJSON(cfg.IndexFile, ix.mem); err != nil { + return Index{}, err + } + } + return ix, ix.Validate() +} + +func (ix *Index) AddChain(node string, pem []byte) error { + chain, err := crtutil.CertificateChainFromPEM(pem) + if err != nil { + return err + } + + var crtID CertificateID + crtID.Set(chain[0]) + path := fmt.Sprintf("%s/%s-%s.pem", ix.cfg.TrustDirectory, node, crtID) + if !ix.mem.addChain(crtID, path) { + return nil // duplicate + } + + if ioutil.CommitData(path, pem); err != nil { + return err + } + return ioutil.CommitJSON(ix.cfg.IndexFile, ix.mem) +} + +func (ix *Index) AddEntries(logID [sha256.Size]byte, entries []monitor.LogEntry) error { + addEntry := func(entry monitor.LogEntry) error { + crt, err := crtutil.CertificateFromLogEntry(entry.LeafData, entry.ExtraData) + if err != nil { + return err + } + + var crtID CertificateID + crtID.Set(crt) + path := fmt.Sprintf("%s/%x-%d.json", ix.cfg.MatchDirectory, logID[:], entry.LeafIndex) + if !ix.mem.addEntry(crtID, path) { + return nil // duplicate + } + + return ioutil.CommitJSON(path, entry) + } + + for _, entry := range entries { + if err := addEntry(entry); err != nil { + return err + } + } + return ioutil.CommitJSON(ix.cfg.IndexFile, ix.mem) +} + +func (ix *Index) TriggerAlerts() ([]CertificateInfo, error) { + alerts := ix.mem.triggerAlerts(ix.cfg.AlertDelay) + if len(alerts) == 0 { + return []CertificateInfo{}, nil + } + return alerts, ioutil.CommitJSON(ix.cfg.IndexFile, ix.mem) +} + +func (index *Index) Validate() error { + return nil // FIXME: check that the index is populated with valid values +} diff --git a/pkg/storage/index/inmem.go b/pkg/storage/index/inmem.go new file mode 100644 index 0000000..0a084bf --- /dev/null +++ b/pkg/storage/index/inmem.go @@ -0,0 +1,113 @@ +package index + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/google/certificate-transparency-go/x509" + "rgdd.se/silent-ct/pkg/crtutil" +) + +type CertificateID string + +func (crtID *CertificateID) Set(crt x509.Certificate) { + *crtID = CertificateID(crtutil.UniqueID(crt)) +} + +type CertificateInfo struct { + ObservedAt time.Time `json:"observed_at"` + StoredAt string `json:"stored_at"` +} + +// index is an in-memory index of certificates +type index struct { + Alerting map[CertificateID][]CertificateInfo `json:"alerting"` // Certificates that were not marked as "good" on time + Legitimate map[CertificateID][]CertificateInfo `json:"legitimate"` // Certificates that are considered "good" + Pending map[CertificateID][]CertificateInfo `json:"pending"` // Certificates that have yet to be marked as "good" +} + +func newIndex() index { + return index{ + Alerting: make(map[CertificateID][]CertificateInfo), + Legitimate: make(map[CertificateID][]CertificateInfo), + Pending: make(map[CertificateID][]CertificateInfo), + } +} + +func (ix *index) JSONUnmarshal(b []byte) error { + type internal index + if err := json.Unmarshal(b, (*internal)(ix)); err != nil { + return err + } + for i, m := range []map[CertificateID][]CertificateInfo{ix.Alerting, ix.Legitimate, ix.Pending} { + if m == nil { + return fmt.Errorf("dictionary named %q is not in the index", []string{"alerting", "legitimate", "pending"}[i]) + } + } + return nil +} + +func (ix *index) triggerAlerts(delay time.Duration) []CertificateInfo { + var alerts []CertificateInfo + + for key, certInfos := range ix.Pending { + certInfo := certInfos[0] + if time.Since(certInfo.ObservedAt) < delay { + continue + } + + alerts = append(alerts, certInfo) + ix.Alerting[key] = certInfos + delete(ix.Pending, key) + } + + return alerts +} + +func (ix *index) addChain(crtID CertificateID, path string) bool { + if _, ok := ix.Legitimate[crtID]; ok { + return false // we already marked this certificate as "good" + } + + entry := CertificateInfo{ObservedAt: time.Now(), StoredAt: path} + crtInfos := []CertificateInfo{entry} + if v, ok := ix.Alerting[crtID]; ok { + crtInfos = append(crtInfos, v...) + delete(ix.Alerting, crtID) // no longer alerting + } else if v, ok := ix.Pending[crtID]; ok { + crtInfos = append(crtInfos, v...) + delete(ix.Pending, crtID) // no longer pending + } + + ix.Legitimate[crtID] = crtInfos + return true // index updated such that this certificate is marked as "good" +} + +func (ix *index) addEntry(crtID CertificateID, path string) bool { + crtInfo := CertificateInfo{ObservedAt: time.Now(), StoredAt: path} + if _, ok := ix.Legitimate[crtID]; ok { + return add(ix.Legitimate, crtID, crtInfo) + } else if _, ok := ix.Alerting[crtID]; ok { + return add(ix.Alerting, crtID, crtInfo) + } + return add(ix.Pending, crtID, crtInfo) +} + +func add(m map[CertificateID][]CertificateInfo, key CertificateID, value CertificateInfo) bool { + crtInfos, ok := m[key] + if !ok { + m[key] = []CertificateInfo{value} + return true + } + + for _, crtInfo := range crtInfos { + if value.StoredAt == crtInfo.StoredAt { + return false // duplicate + } + } + + crtInfos = append(crtInfos, value) + m[key] = crtInfos + return true +} diff --git a/pkg/storage/loglist/loglist.go b/pkg/storage/loglist/loglist.go new file mode 100644 index 0000000..ccc63b0 --- /dev/null +++ b/pkg/storage/loglist/loglist.go @@ -0,0 +1,145 @@ +// Package loglist manages a list of logs to monitor. The list of logs is based +// on Google's signed list. Logs can also be added and removed manually. +package loglist + +import ( + "context" + "fmt" + "time" + + "gitlab.torproject.org/rgdd/ct/pkg/metadata" + "rgdd.se/silent-ct/internal/ioutil" +) + +type Config struct { + PermitBootstrap bool // Get and create initial log metadata if nothing valid is available on disk + MetadataFile string // Path to a dynamically updated metadata file that can be read/written + HistoryDirectory string // Existing directory to store the history of downloaded metadata + + // Optional + MetadataInFuture time.Duration // How wrong the metadata timestamp is allowed to be wrt. future dating + MetadataGetsStale time.Duration // How long until the metadata is considered stale + MetadataIsRecent time.Duration // How long the metadata is considered recent + HTTPTimeout time.Duration // Timeout when fetching metadata + StaticLogs []metadata.Log // Takes precedence over the dynamically downloaded metadata + RemoveLogs []metadata.LogKey // Logs in the downloaded metadata with these keys are ignored +} + +type LogList struct { + cfg Config + md metadata.Metadata + source metadata.Loader +} + +func New(cfg Config) (LogList, error) { + if cfg.MetadataInFuture == 0 { + cfg.MetadataInFuture = 24 * time.Hour + } + if cfg.MetadataGetsStale == 0 { + cfg.MetadataGetsStale = 30 * 24 * time.Hour + } + if cfg.MetadataIsRecent == 0 { + cfg.MetadataIsRecent = 1 * time.Hour + } + if cfg.HTTPTimeout == 0 { + cfg.HTTPTimeout = 10 * time.Second + } + + for i, log := range cfg.StaticLogs { + if err := checkLog(log); err != nil { + return LogList{}, fmt.Errorf("static logs: index %d: %v", i, err) + } + } + for i, key := range cfg.RemoveLogs { + if _, err := key.ID(); err != nil { + return LogList{}, fmt.Errorf("remove logs: index %d: %v", i, err) + } + } + + s := metadata.NewHTTPSource(metadata.HTTPSourceOptions{Name: "google"}) + ll := LogList{cfg: cfg, source: &s} + if err := ioutil.DirectoriesExist([]string{cfg.HistoryDirectory}); err != nil { + return LogList{}, err + } + if err := ioutil.ReadJSON(cfg.MetadataFile, &ll.md); err != nil { + if !ll.cfg.PermitBootstrap { + return LogList{}, err + } + if _, _, err := ll.Update(context.Background()); err != nil { + return LogList{}, err + } + } + return ll, nil +} + +func (ll *LogList) IsRecent() bool { + return time.Now().Before(ll.md.CreatedAt.Add(ll.cfg.MetadataIsRecent)) +} + +func (ll *LogList) IsStale() bool { + return time.Now().After(ll.md.CreatedAt.Add(ll.cfg.MetadataGetsStale)) +} + +func (ll *LogList) Generate() []metadata.Log { + var configure []metadata.Log + + for _, log := range ll.cfg.StaticLogs { + configure = append(configure, log) + } + for _, operator := range ll.md.Operators { + for _, log := range operator.Logs { + if findKey(ll.cfg.RemoveLogs, log) { + continue // static configuration says to remove this log + } + if findLog(configure, log) { + continue // static configuration takes precedence + } + if skipLog(log) { + continue // not in a state where it makes sense to monitor + } + configure = append(configure, log) + } + } + + return configure +} + +func (ll *LogList) Update(ctx context.Context) (added []metadata.Log, removed []metadata.Log, err error) { + b, md, err := ll.archiveDynamic(ctx) + if err != nil { + return + } + if err = ioutil.CommitData(ll.cfg.MetadataFile, b); err != nil { + return + } + + added, removed = metadataLogDiff(ll.md, md) + ll.md = md + return +} + +func (ll *LogList) archiveDynamic(ctx context.Context) ([]byte, metadata.Metadata, error) { + llctx, cancel := context.WithTimeout(ctx, ll.cfg.HTTPTimeout) + defer cancel() + + msg, sig, md, err := ll.source.Load(llctx) + if err != nil { + return nil, metadata.Metadata{}, err + } + if future := time.Now().Add(ll.cfg.MetadataInFuture); md.CreatedAt.After(future) { + return nil, metadata.Metadata{}, fmt.Errorf("list created at %v is in the future", md.CreatedAt) + } + if md.CreatedAt.Before(ll.md.CreatedAt) { + return nil, metadata.Metadata{}, fmt.Errorf("list created at %v is older than the current list", md.CreatedAt) + } + + // FIXME: consider only archiving on major version bumps + path := fmt.Sprintf("%s/%s.json", ll.cfg.HistoryDirectory, time.Now().Format("2006-01-02_1504")) + if err := ioutil.CommitData(path, msg); err != nil { + return nil, metadata.Metadata{}, err + } + if err := ioutil.CommitData(path+".sig", sig); err != nil { + return nil, metadata.Metadata{}, err + } + return msg, md, nil +} diff --git a/pkg/storage/loglist/metadata.go b/pkg/storage/loglist/metadata.go new file mode 100644 index 0000000..adacf81 --- /dev/null +++ b/pkg/storage/loglist/metadata.go @@ -0,0 +1,62 @@ +package loglist + +import "gitlab.torproject.org/rgdd/ct/pkg/metadata" + +// FIXME: helpers that should probably be in the upstream package + +func metadataFindLog(md metadata.Metadata, target metadata.Log) bool { + for _, operator := range md.Operators { + if findLog(operator.Logs, target) { + return true + } + } + return false +} + +func findLog(logs []metadata.Log, target metadata.Log) bool { + targetID, _ := target.Key.ID() + for _, log := range logs { + id, _ := log.Key.ID() + if id == targetID { + return true + } + } + return false +} + +func findKey(keys []metadata.LogKey, target metadata.Log) bool { + targetID, _ := target.Key.ID() + for _, key := range keys { + id, _ := key.ID() + if id == targetID { + return true + } + } + return false +} + +func metadataLogDiff(initial, other metadata.Metadata) (added []metadata.Log, removed []metadata.Log) { + return metadataNewLogsIn(initial, other), metadataNewLogsIn(other, initial) +} + +func metadataNewLogsIn(initial, other metadata.Metadata) (added []metadata.Log) { + for _, operator := range other.Operators { + for _, log := range operator.Logs { + if !metadataFindLog(initial, log) { + added = append(added, log) + } + } + } + return +} + +func checkLog(log metadata.Log) error { + return nil // FIXME: check valid key, url, mmd, state +} + +func skipLog(log metadata.Log) bool { + return log.State == nil || // logs without a state are considered misconfigured + log.State.Name == metadata.LogStatePending || // log is not yet relevant + log.State.Name == metadata.LogStateRetired || // log is not expected to be reachable + log.State.Name == metadata.LogStateRejected // log is not expected to be reachable +} diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go new file mode 100644 index 0000000..5e28aca --- /dev/null +++ b/pkg/storage/storage.go @@ -0,0 +1,155 @@ +// Package storage manages an index of certificates, a dynamically updated log +// list, and a monitor's state on the local file system in a single directory. +package storage + +import ( + "context" + "crypto/sha256" + "crypto/x509" + "fmt" + "net/http" + "path/filepath" + "time" + + "github.com/google/certificate-transparency-go/client" + "github.com/google/certificate-transparency-go/jsonclient" + "gitlab.torproject.org/rgdd/ct/pkg/metadata" + "rgdd.se/silent-ct/internal/ioutil" + "rgdd.se/silent-ct/internal/monitor" + "rgdd.se/silent-ct/pkg/storage/index" + "rgdd.se/silent-ct/pkg/storage/loglist" +) + +type Config struct { + Bootstrap bool // Whether a new storage should be bootstrapped in a non-existing directory + Directory string // Path to a directory where everything will be stored + + // Optional + AlertDelay time.Duration // Time before alerting on certificates that are unaccounted for + StaticLogs []metadata.Log // Static logs to configure in loglist + RemoveLogs []metadata.LogKey // Keys of logs to omit in loglist + HTTPTimeout time.Duration // HTTP timeout used when bootstrapping logs +} + +func (cfg *Config) CertificateIndexFile() string { return cfg.Directory + "/crt_index.json" } +func (cfg *Config) LegitimateCertificateDirectory() string { return cfg.Directory + "/crt_trusted" } +func (cfg *Config) DiscoveredCertificateDirectory() string { return cfg.Directory + "/crt_found" } +func (cfg *Config) MetadataFile() string { return cfg.Directory + "/metadata.json" } +func (cfg *Config) MetadataHistoryDirectory() string { return cfg.Directory + "/metadata_history" } +func (cfg *Config) MonitorStateDirectory() string { return cfg.Directory + "/monitor_state" } +func (cfg *Config) MonitorStateFile(logID [sha256.Size]byte) string { + return fmt.Sprintf("%s/%x.json", cfg.MonitorStateDirectory(), logID[:]) +} + +func (cfg *Config) directories() []string { + return []string{ + cfg.Directory, + cfg.LegitimateCertificateDirectory(), + cfg.DiscoveredCertificateDirectory(), + cfg.MetadataHistoryDirectory(), + cfg.MonitorStateDirectory(), + } +} + +func (cfg *Config) configure() error { + if cfg.Directory == "" { + return fmt.Errorf("directory is required") + } + if cfg.HTTPTimeout == 0 { + cfg.HTTPTimeout = 10 * time.Second + } + + path, err := filepath.Abs(cfg.Directory) + if err != nil { + return err + } + cfg.Directory = path + if err := ioutil.DirectoriesExist(cfg.directories()); err != nil { + if !cfg.Bootstrap { + return err + } + return ioutil.CreateDirectories(cfg.directories()) + } + return nil +} + +type Storage struct { + Config + index.Index + loglist.LogList +} + +func New(cfg Config) (Storage, error) { + err := cfg.configure() + if err != nil { + return Storage{}, err + } + + s := Storage{Config: cfg} + if s.Index, err = index.New(index.Config{ + PermitBootstrap: cfg.Bootstrap, + IndexFile: cfg.CertificateIndexFile(), + TrustDirectory: cfg.LegitimateCertificateDirectory(), + MatchDirectory: cfg.DiscoveredCertificateDirectory(), + AlertDelay: cfg.AlertDelay, + }); err != nil { + return Storage{}, err + } + + if s.LogList, err = loglist.New(loglist.Config{ + PermitBootstrap: cfg.Bootstrap, + MetadataFile: cfg.MetadataFile(), + HistoryDirectory: cfg.MetadataHistoryDirectory(), + StaticLogs: cfg.StaticLogs, + RemoveLogs: cfg.RemoveLogs, + }); err != nil { + return Storage{}, err + } + + return s, err +} + +func (s *Storage) BootstrapLog(ctx context.Context, log metadata.Log, skipBacklog bool) (monitor.State, error) { + storedState, err := s.GetMonitorState(log) + if err == nil { + return storedState, ErrorMonitorStateExists + } + + key, err := x509.MarshalPKIXPublicKey(log.Key.Public) + if err != nil { + return monitor.State{}, err + } + cli, err := client.New(string(log.URL), &http.Client{}, jsonclient.Options{PublicKeyDER: key}) + if err != nil { + return monitor.State{}, err + } + + sctx, cancel := context.WithTimeout(ctx, s.Config.HTTPTimeout) + defer cancel() + sth, err := cli.GetSTH(sctx) + if err != nil { + return monitor.State{}, err + } + id, _ := log.Key.ID() + sth.LogID = id + + state := monitor.State{SignedTreeHead: *sth} + if skipBacklog { + state.NextIndex = sth.TreeSize + } + return state, s.SetMonitorState(id, state) +} + +func (s *Storage) SetMonitorState(logID [sha256.Size]byte, state monitor.State) error { + return ioutil.CommitJSON(s.MonitorStateFile(logID), state) +} + +func (s *Storage) GetMonitorState(log metadata.Log) (monitor.State, error) { + id, err := log.Key.ID() + if err != nil { + return monitor.State{}, err + } + + state := monitor.State{} + return state, ioutil.ReadJSON(s.MonitorStateFile(id), &state) +} diff --git a/pkg/submission/submission.go b/pkg/submission/submission.go new file mode 100644 index 0000000..d33a49d --- /dev/null +++ b/pkg/submission/submission.go @@ -0,0 +1,104 @@ +// Package submission provides creation and opening of a node's legitimately +// issued certificate chains. A submission is composed of a name, a message +// authentication code, and one or more certificate chains in PEM format. +// +// Note: this package makes no attempt to parse any certificate chain. In other +// words, each certificate chain is treated as an opaque list of bytes. +package submission + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "strings" + + "rgdd.se/silent-ct/pkg/policy" +) + +const ( + Separator = "silent-ct:separator\n" +) + +type Submission []byte + +func New(node policy.Node, data [][]byte) (Submission, error) { + mac, err := node.HMAC(message(data)) + if err != nil { + return nil, fmt.Errorf("hmac: %v", err) + } + + buf := bytes.NewBuffer(nil) + buf.WriteString(fmt.Sprintf("%s %x\n", node.Name, mac[:])) + buf.Write(message(data)) + return buf.Bytes(), nil +} + +func (s *Submission) Peek() (name string, err error) { + name, _, _, err = s.split() + return +} + +func (s *Submission) Open(node policy.Node) ([][]byte, error) { + name, gotMAC, msg, err := s.split() + if err != nil { + return nil, err + } + if name != node.Name { + return nil, fmt.Errorf("wrong node name %s", name) + } + wantMAC, err := node.HMAC(msg) + if err != nil { + return nil, fmt.Errorf("hmac: %v", err) + } + if !hmac.Equal(gotMAC[:], wantMAC[:]) { + return nil, fmt.Errorf("hmac: mismatch") + } + + return bytes.Split(msg, []byte(Separator)), nil +} + +func (s *Submission) split() (name string, mac [sha256.Size]byte, msg []byte, err error) { + i := bytes.IndexByte(*s, '\n') + if i == -1 { + return name, mac, msg, fmt.Errorf("no authorization line") + } + authLine := (*s)[:i] + msg = (*s)[i+1:] + + split := strings.Split(string(authLine), " ") + if len(split) != 2 { + return name, mac, msg, fmt.Errorf("invalid authorization line") + } + b, err := hex.DecodeString(split[1]) + if err != nil { + return name, mac, msg, fmt.Errorf("mac: %v", err) + } + if len(b) != len(mac) { + return name, mac, msg, fmt.Errorf("mac: length must be %d bytes", len(mac)) + } + + name = split[0] + copy(mac[:], b) + return +} + +func message(data [][]byte) []byte { + if len(data) == 0 { + return []byte{} + } + + buf := bytes.NewBuffer(data[0]) + for i, d := range data[1:] { + prev := data[i] + if prev[len(prev)-1] != '\n' { + buf.Write([]byte("\n")) + } + + buf.Write([]byte(Separator)) + buf.Write(d) + } + + return buf.Bytes() +} |