From d42630dd8bf46aaf126ba90c6e9fcab0cdb84841 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 3 Jun 2024 12:52:38 -0700 Subject: [PATCH 01/43] Implement certificate exchange This implements a basic certificate exchange protocol (which still needs tests and is probably slightly broken at the moment). Importantly, it's missing the ability to fetch power table deltas for validating future instances (beyond the latest certificate). My plan is to implement this as a somewhat separate protocol (likely re-using a lot of the same machinery). However: 1. That protocol is only needed for observer nodes. Active participants in the network will follow the EC chain and will learn these power tables through the EC chain. 2. That protocol won't need as much guessing because we'll _know_ which power tables should be available given the latest certificate we've received. The large remaining TODOs are tests and design documentation. The entire protocol has been in constant flux so... I'm sure there are some inconsistencies... --- certexchange/client.go | 142 +++++++++++++++ certexchange/gen.go | 239 ++++++++++++++++++++++++ certexchange/polling/peerTracker.go | 269 ++++++++++++++++++++++++++++ certexchange/polling/poller.go | 133 ++++++++++++++ certexchange/polling/predictor.go | 108 +++++++++++ certexchange/polling/subscriber.go | 238 ++++++++++++++++++++++++ certexchange/protocol.go | 28 +++ certexchange/server.go | 125 +++++++++++++ gen/main.go | 9 + go.mod | 2 +- 10 files changed, 1292 insertions(+), 1 deletion(-) create mode 100644 certexchange/client.go create mode 100644 certexchange/gen.go create mode 100644 certexchange/polling/peerTracker.go create mode 100644 certexchange/polling/poller.go create mode 100644 certexchange/polling/predictor.go create mode 100644 certexchange/polling/subscriber.go create mode 100644 certexchange/protocol.go create mode 100644 certexchange/server.go diff --git a/certexchange/client.go b/certexchange/client.go new file mode 100644 index 00000000..88a528c9 --- /dev/null +++ b/certexchange/client.go @@ -0,0 +1,142 @@ +package certexchange + +import ( + "bufio" + "context" + "fmt" + "io" + "runtime/debug" + "time" + + "github.com/filecoin-project/go-f3" + "github.com/filecoin-project/go-f3/certs" + "github.com/filecoin-project/go-f3/gpbft" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" +) + +// We've estimated the max power table size to be less than 1MiB: +// +// 1. For 10k participants. +// 2. <100 bytes per entry (key + id + power) +const maxPowerTableSize = 1024 * 1024 + +// Client is a libp2p certificate exchange client for requesting finality certificates from specific +// peers. +type Client struct { + Host host.Host + NetworkName gpbft.NetworkName + RequestTimeout time.Duration + + Log f3.Logger +} + +func resetOnCancel(ctx context.Context, s network.Stream) func() { + errCh := make(chan error, 1) + cancel := context.AfterFunc(ctx, func() { + errCh <- s.Reset() + close(errCh) + }) + return func() { + if cancel() { + _ = s.Reset() + } else { + <-errCh + } + } +} + +func (c *Client) withDeadline(ctx context.Context) (context.Context, context.CancelFunc) { + if c.RequestTimeout > 0 { + return context.WithTimeout(ctx, c.RequestTimeout) + } + return ctx, func() {} +} + +// Request finality certificates from the specified peer. Returned finality certificates start at +// the requested instance number and are sequential, but are otherwise unvalidated. +func (c *Client) Request(ctx context.Context, p peer.ID, req *Request) (_rh *ResponseHeader, _ch <-chan *certs.FinalityCertificate, _err error) { + defer func() { + if perr := recover(); perr != nil { + _err = fmt.Errorf("panicked requesting certificates from peer %s: %v\n%s", p, perr, string(debug.Stack())) + c.Log.Error(_err) + } + }() + + ctx, cancel := c.withDeadline(ctx) + defer cancel() + + proto := FetchProtocolName(c.NetworkName) + stream, err := c.Host.NewStream(ctx, p, proto) + if err != nil { + return nil, nil, err + } + defer resetOnCancel(ctx, stream)() + + if deadline, ok := ctx.Deadline(); ok { + if err := stream.SetDeadline(deadline); err != nil { + return nil, nil, err + } + } + + br := &io.LimitedReader{R: bufio.NewReader(stream), N: 100} + bw := bufio.NewWriter(stream) + + if err := req.MarshalCBOR(bw); err != nil { + c.Log.Debugf("failed to marshal certificate exchange request to peer %s: %w", p, err) + return nil, nil, err + } + if err := bw.Flush(); err != nil { + return nil, nil, err + } + if err := stream.CloseWrite(); err != nil { + return nil, nil, err + } + + var resp ResponseHeader + if req.IncludePowerTable { + br.N = maxPowerTableSize + } + err = resp.UnmarshalCBOR(br) + if err != nil { + c.Log.Debugf("failed to unmarshal certificate exchange response header from peer %s: %w", p, err) + return nil, nil, err + } + + ch := make(chan *certs.FinalityCertificate, 1) + // copy this in case the caller decides to re-use the request object... + request := *req + go func() { + defer func() { + if perr := recover(); perr != nil { + c.Log.Errorf("panicked while receiving certificates from peer %s: %v\n%s", p, perr, string(debug.Stack())) + } + }() + defer close(ch) + for i := uint64(0); request.Limit == 0 || i < request.Limit; i++ { + cert := new(certs.FinalityCertificate) + + // We'll read at most 1MiB per certificate. They generally shouldn't be that + // large, but large power deltas could get close. + br.N = maxPowerTableSize + err := cert.UnmarshalCBOR(br) + if err != nil { + c.Log.Debugf("failed to unmarshal certificate from peer %s: %w", p, err) + return + } + // One quick sanity check. The rest will be validated by the caller. + if cert.GPBFTInstance != request.FirstInstance+i { + c.Log.Warnf("received out-of-order certificate from peer %s", p) + return + } + + select { + case <-ctx.Done(): + return + case ch <- cert: + } + } + }() + return &resp, ch, nil +} diff --git a/certexchange/gen.go b/certexchange/gen.go new file mode 100644 index 00000000..52ceb479 --- /dev/null +++ b/certexchange/gen.go @@ -0,0 +1,239 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package certexchange + +import ( + "fmt" + "io" + "math" + "sort" + + gpbft "github.com/filecoin-project/go-f3/gpbft" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufRequest = []byte{131} + +func (t *Request) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufRequest); err != nil { + return err + } + + // t.FirstInstance (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.FirstInstance)); err != nil { + return err + } + + // t.Limit (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Limit)); err != nil { + return err + } + + // t.IncludePowerTable (bool) (bool) + if err := cbg.WriteBool(w, t.IncludePowerTable); err != nil { + return err + } + return nil +} + +func (t *Request) UnmarshalCBOR(r io.Reader) (err error) { + *t = Request{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.FirstInstance (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.FirstInstance = uint64(extra) + + } + // t.Limit (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Limit = uint64(extra) + + } + // t.IncludePowerTable (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.IncludePowerTable = false + case 21: + t.IncludePowerTable = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + return nil +} + +var lengthBufResponseHeader = []byte{130} + +func (t *ResponseHeader) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufResponseHeader); err != nil { + return err + } + + // t.PendingInstance (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PendingInstance)); err != nil { + return err + } + + // t.PowerTable ([]gpbft.PowerEntry) (slice) + if len(t.PowerTable) > 8192 { + return xerrors.Errorf("Slice value in field t.PowerTable was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.PowerTable))); err != nil { + return err + } + for _, v := range t.PowerTable { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + + } + return nil +} + +func (t *ResponseHeader) UnmarshalCBOR(r io.Reader) (err error) { + *t = ResponseHeader{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.PendingInstance (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PendingInstance = uint64(extra) + + } + // t.PowerTable ([]gpbft.PowerEntry) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > 8192 { + return fmt.Errorf("t.PowerTable: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.PowerTable = make([]gpbft.PowerEntry, extra) + } + + for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err + + { + + if err := t.PowerTable[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PowerTable[i]: %w", err) + } + + } + + } + } + return nil +} diff --git a/certexchange/polling/peerTracker.go b/certexchange/polling/peerTracker.go new file mode 100644 index 00000000..54077160 --- /dev/null +++ b/certexchange/polling/peerTracker.go @@ -0,0 +1,269 @@ +package polling + +import ( + "cmp" + "container/heap" + "math/rand/v2" + "slices" + + "github.com/libp2p/go-libp2p/core/peer" +) + +const ( + hitMissSlidingWindow = 10 + maxBackoffExponent = 8 + // The default number of requests to make. + defaultRequests = 8 + // The minimum number of requests to make. If we pick fewer than this number of peers, we'll + // randomly sample known peers to find more. + minRequests = 4 + // The maximum number of requests to make, even if all of our peers appear to be unreliable. + maxRequests = 32 +) + +type peerState int + +const ( + peerEvil peerState = iota - 1 + peerInactive + peerActive +) + +// TODO: Track latency and connectedness. +type peerRecord struct { + sequentialFailures int + + // Sliding windows of hits/misses (0-10 each). If either would exceed 10, we subtract 1 from + // both (where 0 is the floor). + // + // - We use sliding windows to give more weight to recent hits/misses. + // - We don't use a simple weighted moving average because that doesn't track how "sure" we + // are of the measurement. + hits, misses int + + state peerState +} + +type backoffHeap []*backoffRecord + +type backoffRecord struct { + peer peer.ID + // Delay until a round. XXX maybe do this in terms of wall-clock time? + delayUntil int +} + +type peerTracker struct { + // TODO: garbage collect this. + peers map[peer.ID]*peerRecord + // TODO: Limit the number of active peers. + active []peer.ID + backoff backoffHeap + round int +} + +func (r *peerRecord) Cmp(other *peerRecord) int { + if c := cmp.Compare(r.state, other.state); c != 0 { + return c + } + rateA, countA := r.hitRate() + rateB, countB := other.hitRate() + + if c := cmp.Compare(rateA, rateB); c != 0 { + return c + } + if c := cmp.Compare(countA, countB); c != 0 { + return c + } + return 0 +} + +// Len implements heap.Interface. +func (b *backoffHeap) Len() int { + return len(*b) +} + +// Less implements heap.Interface. +func (b *backoffHeap) Less(i int, j int) bool { + return (*b)[i].delayUntil < (*b)[j].delayUntil +} + +// Pop implements heap.Interface. +func (b *backoffHeap) Pop() any { + return (*b)[len(*b)-1] +} + +// Push implements heap.Interface. +func (b *backoffHeap) Push(x any) { + *b = append(*b, x.(*backoffRecord)) +} + +// Swap implements heap.Interface. +func (b *backoffHeap) Swap(i int, j int) { + (*b)[i], (*b)[j] = (*b)[j], (*b)[i] +} + +// Records a failed request and returns how many rounds we should avoid picking this peer for. +func (r *peerRecord) recordFailure() int { + r.sequentialFailures++ + r.state = peerInactive + return 1 << min(r.sequentialFailures, maxBackoffExponent) +} + +func (r *peerRecord) recordHit() { + r.sequentialFailures = 0 + if r.hits < hitMissSlidingWindow { + r.hits++ + } else if r.misses > 0 { + r.misses-- + } +} + +func (r *peerRecord) recordMiss() { + r.sequentialFailures = 0 + if r.misses < hitMissSlidingWindow { + r.misses++ + } else if r.hits > 0 { + r.hits-- + } +} + +// Return the hit rate and the +func (r *peerRecord) hitRate() (float64, int) { + total := r.hits + r.misses + // set the default rate such that we we ask `defaultRequests` peers by default. + rate := float64(1) / defaultRequests + if total > 0 { + rate = float64(r.hits) / float64(total) + } + return rate, total + +} + +func (t *peerTracker) getOrCreate(p peer.ID) *peerRecord { + r, ok := t.peers[p] + if !ok { + r = new(peerRecord) + t.peers[p] = r + } + return r +} + +func (t *peerTracker) recordInvalid(p peer.ID) { + t.getOrCreate(p).state = peerEvil +} + +func (t *peerTracker) recordMiss(p peer.ID) { + t.getOrCreate(p).recordMiss() +} + +func (t *peerTracker) recordFailure(p peer.ID) { + // When we fail to query a peer, backoff that peer. + r := &backoffRecord{ + peer: p, + delayUntil: t.round + t.getOrCreate(p).recordFailure(), + } + heap.Push(&t.backoff, r) +} + +func (t *peerTracker) recordHit(p peer.ID) { + t.getOrCreate(p).recordHit() +} + +func (t *peerTracker) makeActive(p peer.ID) { + r := t.getOrCreate(p) + if r.state != peerInactive { + return + } + r.state = peerActive + t.active = append(t.active, p) +} + +func (t *peerTracker) peerSeen(p peer.ID) { + if _, ok := t.peers[p]; !ok { + t.peers[p] = &peerRecord{state: peerActive} + t.active = append(t.active, p) + } +} + +// Suggest a number of peers from which to request new certificates based on their historical +// record. +// +// TODO: Add a multiplier if we're not making progress. +func (t *peerTracker) suggestPeers() []peer.ID { + // XXX: this should be a param. + const targetProbability = 1.1 + + // Advance the round and move peers from backoff to active, if necessary. + t.round++ + for t.backoff.Len() > 0 { + r := t.backoff[0] + if r.delayUntil > t.round { + break + } + heap.Pop(&t.backoff) + t.makeActive(r.peer) + } + + // Sort from best to worst. + slices.SortFunc(t.active, func(a, b peer.ID) int { + return t.getOrCreate(b).Cmp(t.getOrCreate(a)) + }) + // Trim off any inactive/evil peers from the end, they'll be sorted last. + for l := len(t.active); l > 0 && t.getOrCreate(t.active[l-1]).state != peerActive; l-- { + t.active = t.active[:l] + } + var prob float64 + var peerCount int + for _, p := range t.active { + hitRate, _ := t.getOrCreate(p).hitRate() + // If we believe this and all the rest of the peers are useless, choose the rest of + // the peers randomly. + if hitRate == 0 { + break + } + + prob += hitRate + peerCount++ + if peerCount >= maxRequests { + break + } + // Keep going till we're 110% sure. + if prob >= targetProbability { + break + } + } + + chosen := t.active[:peerCount:peerCount] + + if peerCount == len(t.active) { + // We've chosen all peers, nothing else we can do. + } else if prob < targetProbability { + // If we failed to reach the target probability, choose randomly from the remaining + // peers. + chosen = append(chosen, choose(t.active[peerCount:], maxRequests-peerCount)...) + } else if peerCount < minRequests { + // If we reached the target probability but didn't reach the number of minimum + // requests, pick a few more peers to fill us out. + chosen = append(chosen, choose(t.active[peerCount:], minRequests-peerCount)...) + } + + return chosen +} + +var _ heap.Interface = new(backoffHeap) + +func choose[T any](items []T, count int) []T { + if len(items) <= count { + return items + } + + // Knuth 3.4.2S. Could use rand.Perm, but that would allocate a large array. + // There are more efficient algorithms for small sample sizes, but they're complex. + chosen := make([]T, 0, count) + for t := 0; len(chosen) < cap(chosen); t++ { + if rand.IntN(len(items)-t) < cap(chosen)-len(chosen) { + chosen = append(chosen, items[t]) + } + } + return chosen +} diff --git a/certexchange/polling/poller.go b/certexchange/polling/poller.go new file mode 100644 index 00000000..f979ea4e --- /dev/null +++ b/certexchange/polling/poller.go @@ -0,0 +1,133 @@ +package polling + +import ( + "context" + + "github.com/filecoin-project/go-f3/certexchange" + "github.com/filecoin-project/go-f3/certs" + "github.com/filecoin-project/go-f3/certstore" + "github.com/filecoin-project/go-f3/gpbft" + "github.com/libp2p/go-libp2p/core/peer" +) + +// A Poller will poll specific peers on-demand to try to advance the current GPBFT instance. +type Poller struct { + *certexchange.Client + + Store *certstore.Store + SignatureVerifier gpbft.Verifier + PowerTable gpbft.PowerEntries + NextInstance uint64 +} + +// NewPoller constructs a new certificate poller and initializes it from the passed certificate store. +func NewPoller(ctx context.Context, client *certexchange.Client, store *certstore.Store, verifier gpbft.Verifier) (*Poller, error) { + var nextInstance uint64 + if latest := store.Latest(); latest != nil { + nextInstance = latest.GPBFTInstance + 1 + } + pt, err := store.GetPowerTable(ctx, nextInstance) + if err != nil { + return nil, err + } + return &Poller{ + Client: client, + Store: store, + SignatureVerifier: verifier, + NextInstance: nextInstance, + PowerTable: pt, + }, nil +} + +type PollResult int + +const ( + PollMiss PollResult = iota + PollHit + PollFailed + PollIllegal +) + +// CatchUp attempts to advance to the latest instance from the certificate store without making any +// network requests. It returns the number of instances we advanced. +func (p *Poller) CatchUp(ctx context.Context) (uint64, error) { + latest := p.Store.Latest() + if latest == nil { + return 0, nil + } + + next := latest.GPBFTInstance + 1 + progress := next - p.NextInstance + + if progress == 0 { + return 0, nil + } + + pt, err := p.Store.GetPowerTable(ctx, next) + if err != nil { + return 0, err + } + p.PowerTable = pt + p.NextInstance = next + return progress, nil +} + +// Poll polls a specific peer, possibly multiple times, in order to advance the instance as much as +// possible. It returns: +// +// 1. A PollResult indicating the outcome: miss, hit, failed, illegal. +// 2. An error if something went wrong internally (e.g., the certificate store returned an error). +func (p *Poller) Poll(ctx context.Context, peer peer.ID) (PollResult, error) { + var result PollResult + for { + // Requests take time, so always try to catch-up between requests in case there has + // been some "local" action from the GPBFT instance. + if _, err := p.CatchUp(ctx); err != nil { + return PollFailed, err + } + + resp, ch, err := p.Request(ctx, peer, &certexchange.Request{ + FirstInstance: p.NextInstance, + Limit: maxRequestLength, + IncludePowerTable: false, + }) + if err != nil { + return PollFailed, nil + } + + // If they're caught up, record it as a hit. Otherwise, if they have nothing + // to give us, move on. + if resp.PendingInstance >= p.NextInstance { + result = PollHit + } + + received := 0 + for cert := range ch { + // TODO: consider batching verification, it's slightly faster. + next, _, pt, err := certs.ValidateFinalityCertificates( + p.SignatureVerifier, p.NetworkName, p.PowerTable, p.NextInstance, nil, + *cert, + ) + if err != nil { + return PollIllegal, nil + } + if err := p.Store.Put(ctx, cert); err != nil { + return PollHit, err + } + p.NextInstance = next + p.PowerTable = pt + received++ + } + + // Try again if they're claiming to have more instances (and gave me at + // least one). + if resp.PendingInstance <= p.NextInstance { + return result, nil + } else if received == 0 { + // If they give me no certificates but claim to have more, treat this as a + // failure (could be a connection failure, etc). + return PollFailed, nil + } + + } +} diff --git a/certexchange/polling/predictor.go b/certexchange/polling/predictor.go new file mode 100644 index 00000000..8b570a5f --- /dev/null +++ b/certexchange/polling/predictor.go @@ -0,0 +1,108 @@ +package polling + +import ( + "time" +) + +const ( + maxBackoffMultiplier = 10 + minExploreDistance = 100 * time.Millisecond +) + +func newPredictor(targetAccuracy float64, minInterval, defaultInterval, maxInterval time.Duration) *predictor { + return &predictor{ + minInterval: minInterval, + maxInterval: maxInterval, + interval: defaultInterval, + exploreDistance: defaultInterval / 2, + } +} + +// An interval predictor that tries to predict the time between instances. It can't predict the time +// an instance will be available, but it'll keep adjusting the interval until we receive one +// instance per interval. +type predictor struct { + minInterval, maxInterval time.Duration + + next time.Time + interval time.Duration + wasIncreasing bool + exploreDistance time.Duration + + backoff time.Duration +} + +// Update the predictor. The one argument indicates how many certificates we received since the last +// update. +// +// - 2+ -> interval is too long. +// - 1 -> interval is perfect. +// - 0 -> interval is too short. +// +// We don't actually know the _offset_... but whatever. We can keep up +/- one instance and that's +// fine (especially because of the power table lag, etc.). +func (p *predictor) update(progress uint64) time.Duration { + if p.backoff > 0 { + if progress > 0 { + p.backoff = 0 + } + } else if progress != 1 { + // If we've made too much progress (interval too long) or made no progress (interval + // too short), explore to find the right interval. + + // We halve the explore distance when we switch directions and double it whenever we + // need to keep moving in the same direction repeatedly. + if p.wasIncreasing == (progress > 1) { + p.exploreDistance /= 2 + } else { + p.exploreDistance *= 2 + } + + // Make sure the explore distance doesn't get too short/long. + if p.exploreDistance < minExploreDistance { + p.exploreDistance = minExploreDistance + } else if p.exploreDistance > p.maxInterval/2 { + p.exploreDistance = p.maxInterval / 2 + } + + // Then update the interval. + if progress == 0 { + // If we fail to make progress, enter "backoff" mode. We'll leave backoff + // mode next time we receive a certificate. Otherwise, we'd end up quickly + // skewing our belief of the correct interval e.g., due to a skipped + // instance. + p.backoff = p.interval + p.interval += p.exploreDistance + p.wasIncreasing = true + } else { + p.interval -= p.exploreDistance + p.wasIncreasing = false + } + + // Clamp between min/max + if p.interval < p.minInterval { + p.interval = p.minInterval + } else if p.interval > p.maxInterval { + p.interval = p.maxInterval + } + } + + // Apply either the backoff or predicted the interval. + if p.backoff > 0 { + p.backoff = min(2*p.backoff, maxBackoffMultiplier*p.maxInterval) + p.next = p.next.Add(p.backoff) + } else { + p.next = p.next.Add(p.interval) + } + + // Polling takes time. If we run behind, predict that we should poll again immediately. We + // enforce a minimum interval above so this shouldn't be too often. + now := time.Now() + prediction := p.next.Sub(now) + if prediction < 0 { + p.next = now + return 0 + } + + return prediction +} diff --git a/certexchange/polling/subscriber.go b/certexchange/polling/subscriber.go new file mode 100644 index 00000000..798b698a --- /dev/null +++ b/certexchange/polling/subscriber.go @@ -0,0 +1,238 @@ +package polling + +import ( + "context" + "fmt" + "slices" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + "github.com/filecoin-project/go-f3/certexchange" + "github.com/filecoin-project/go-f3/certstore" + "github.com/filecoin-project/go-f3/gpbft" +) + +const maxRequestLength = 256 + +// A polling Subscriber will continuously poll the network for new finality certificates. +type Subscriber struct { + certexchange.Client + + Store *certstore.Store + SignatureVerifier gpbft.Verifier + InitialPollInterval time.Duration + MaximumPollInterval time.Duration + MinimumPollInterval time.Duration + + peerTracker peerTracker + + wg sync.WaitGroup + + ctx context.Context + stop context.CancelFunc +} + +func (s *Subscriber) Start() error { + s.wg.Add(1) + s.ctx, s.stop = context.WithCancel(context.Background()) + go func() { + defer s.wg.Done() + + ctx, cancel := context.WithCancel(s.ctx) + defer cancel() + if err := s.run(ctx); err != nil && s.ctx.Err() != nil { + s.Log.Errorf("polling certificate exchange subscriber exited early: %w", err) + } + }() + + return nil +} + +func (s *Subscriber) Stop() error { + s.stop() + s.wg.Wait() + + return nil +} + +// Discover new peers. +func (s *Subscriber) libp2pDiscover(ctx context.Context) (<-chan peer.ID, error) { + out := make(chan peer.ID, 256) + discoveryEvents, err := s.Host.EventBus().Subscribe([]any{ + new(event.EvtPeerIdentificationCompleted), + new(event.EvtPeerProtocolsUpdated), + }) + if err != nil { + return nil, err + } + + targetProtocol := certexchange.FetchProtocolName(s.NetworkName) + + // Mark already connected peers as "seen". + for _, p := range s.Host.Network().Peers() { + if proto, err := s.Host.Peerstore().FirstSupportedProtocol(p, targetProtocol); err == nil && proto == targetProtocol { + s.peerTracker.peerSeen(p) + } + } + + // Then start listening for new peers + s.wg.Add(1) + go func() { + defer s.wg.Done() + defer discoveryEvents.Close() + for { + var ( + evt any + ok bool + ) + select { + case evt, ok = <-discoveryEvents.Out(): + case <-ctx.Done(): + } + if !ok { + return + } + + var protos []protocol.ID + var peer peer.ID + switch e := evt.(type) { + case *event.EvtPeerIdentificationCompleted: + protos = e.Protocols + peer = e.Peer + case *event.EvtPeerProtocolsUpdated: + protos = e.Added + peer = e.Peer + default: + continue + } + if slices.Contains(protos, targetProtocol) { + // If the channel is full, ignore newly discovered peers. We + // likely have enough anyways and we'll drain the channel + // eventually. + select { + case out <- peer: + default: + } + } + } + }() + return out, nil +} + +func (s *Subscriber) run(ctx context.Context) error { + timer := time.NewTimer(s.InitialPollInterval) + defer timer.Stop() + + discoveredPeers, err := s.libp2pDiscover(ctx) + if err != nil { + return err + } + + predictor := newPredictor( + 0.05, + s.MinimumPollInterval, + s.InitialPollInterval, + s.MaximumPollInterval, + ) + + poller, err := NewPoller(ctx, &s.Client, s.Store, s.SignatureVerifier) + if err != nil { + return err + } + + for ctx.Err() == nil { + var err error + + // Always handle newly discovered peers and new certificates from the certificate + // store _first_. Then check the timer to see if we should poll. + select { + case p := <-discoveredPeers: + s.peerTracker.peerSeen(p) + default: + select { + case p := <-discoveredPeers: + s.peerTracker.peerSeen(p) + case <-timer.C: + // First, see if we made progress locally. If we have, update + // interval prediction based on that local progress. If our interval + // was accurate, we'll keep predicting the same interval and we'll + // never make any network requests. If we stop making local + // progress, we'll start making network requests again. + var progress uint64 + progress, err = poller.CatchUp(ctx) + if err != nil { + break + } + if progress > 0 { + timer.Reset(predictor.update(progress)) + break + } + + progress, err = s.poll(ctx, poller) + if err != nil { + break + } + timer.Reset(predictor.update(progress)) + case <-ctx.Done(): + return ctx.Err() + } + } + + if err != nil { + return err + } + } + return ctx.Err() +} + +func (s *Subscriber) poll(ctx context.Context, poller *Poller) (uint64, error) { + var ( + misses []peer.ID + hits []peer.ID + ) + + start := poller.NextInstance + for _, peer := range s.peerTracker.suggestPeers() { + oldInstance := poller.NextInstance + res, err := poller.Poll(ctx, peer) + if err != nil { + return poller.NextInstance - start, err + } + // If we manage to advance, all old "hits" are actually misses. + if oldInstance < poller.NextInstance { + misses = append(misses, hits...) + hits = hits[:0] + } + + switch res { + case PollMiss: + misses = append(misses, peer) + case PollHit: + hits = append(hits, peer) + case PollFailed: + s.peerTracker.recordFailure(peer) + case PollIllegal: + s.peerTracker.recordInvalid(peer) + default: + panic(fmt.Sprintf("unexpected polling.PollResult: %#v", res)) + } + } + + // If we've made progress, record hits/misses. Otherwise, we just have to assume that we + // asked too soon. + progress := poller.NextInstance - start + if progress > 0 { + for _, p := range misses { + s.peerTracker.recordMiss(p) + } + for _, p := range hits { + s.peerTracker.recordHit(p) + } + } + + return progress, nil +} diff --git a/certexchange/protocol.go b/certexchange/protocol.go new file mode 100644 index 00000000..0ea08299 --- /dev/null +++ b/certexchange/protocol.go @@ -0,0 +1,28 @@ +package certexchange + +import ( + "github.com/filecoin-project/go-f3/gpbft" + "github.com/libp2p/go-libp2p/core/protocol" +) + +func FetchProtocolName(nn gpbft.NetworkName) protocol.ID { + return protocol.ID("/f3/certexch/get/1/" + string(nn)) +} + +type Request struct { + // First instance to fetch. + FirstInstance uint64 + // Max number of instances to fetch. The server may respond with fewer certificates than + // requested, even if more are available. + Limit uint64 + // Include the full power table needed to validate the first finality certificate. + // Checked by the user against their last finality certificate. + IncludePowerTable bool +} + +type ResponseHeader struct { + // The next instance to be finalized. This is 0 when no instances have been finalized. + PendingInstance uint64 + // Power table, if requested, or empty. + PowerTable []gpbft.PowerEntry +} diff --git a/certexchange/server.go b/certexchange/server.go new file mode 100644 index 00000000..483f0eb6 --- /dev/null +++ b/certexchange/server.go @@ -0,0 +1,125 @@ +package certexchange + +import ( + "bufio" + "context" + "errors" + "fmt" + "runtime/debug" + "time" + + "github.com/filecoin-project/go-f3" + "github.com/filecoin-project/go-f3/certstore" + "github.com/filecoin-project/go-f3/gpbft" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" +) + +const maxResponseLen = 256 + +// Server is libp2p a certificate exchange server. +type Server struct { + // Request timeouts. If non-zero, requests will be canceled after the specified duration. + RequestTimeout time.Duration + NetworkName gpbft.NetworkName + Host host.Host + Store *certstore.Store + Log f3.Logger + + cancel context.CancelFunc +} + +func (s *Server) withDeadline(ctx context.Context) (context.Context, context.CancelFunc) { + if s.RequestTimeout > 0 { + return context.WithTimeout(ctx, s.RequestTimeout) + } + return ctx, func() {} +} + +func (s *Server) handleRequest(ctx context.Context, stream network.Stream) (_err error) { + defer func() { + if perr := recover(); perr != nil { + _err = fmt.Errorf("panicked in server response: %v", perr) + s.Log.Errorf("%s\n%s", string(debug.Stack())) + } + }() + + if deadline, ok := ctx.Deadline(); ok { + if err := stream.SetDeadline(deadline); err != nil { + return err + } + } + + br := bufio.NewReader(stream) + bw := bufio.NewWriter(stream) + + // Request has no variable-length fields, so we don't need a limited reader. + var req Request + if err := req.UnmarshalCBOR(br); err != nil { + s.Log.Debugf("failed to read request from stream: %w", err) + return err + } + + limit := req.Limit + if limit == 0 || limit > maxResponseLen { + limit = maxResponseLen + } + var resp ResponseHeader + if latest := s.Store.Latest(); latest != nil { + resp.PendingInstance = latest.GPBFTInstance + 1 + } + + if resp.PendingInstance <= req.FirstInstance { + pt, err := s.Store.GetPowerTable(ctx, req.FirstInstance) + if err != nil { + s.Log.Errorf("failed to load power table: %w", err) + return err + } + resp.PowerTable = pt + } + + if err := resp.MarshalCBOR(bw); err != nil { + s.Log.Debugf("failed to write header to stream: %w", err) + return err + } + + if resp.PendingInstance > req.FirstInstance { + certs, err := s.Store.GetRange(ctx, req.FirstInstance, req.FirstInstance+limit) + if err == nil || errors.Is(err, certstore.ErrCertNotFound) { + for i := range certs { + if err := certs[i].MarshalCBOR(bw); err != nil { + s.Log.Debugf("failed to write certificate to stream: %w", err) + return err + } + } + } else { + s.Log.Errorf("failed to load finality certificates: %w", err) + } + } + return bw.Flush() +} + +// Start the server. +func (s *Server) Start() error { + ctx, cancel := context.WithCancel(context.Background()) + s.cancel = cancel + s.Host.SetStreamHandler(FetchProtocolName(s.NetworkName), func(stream network.Stream) { + ctx, cancel := s.withDeadline(ctx) + defer cancel() + + if err := s.handleRequest(ctx, stream); err != nil { + _ = stream.Reset() + } else { + _ = stream.Close() + } + + }) + return nil +} + +// Stop the server. +func (s *Server) Stop() error { + s.Host.RemoveStreamHandler(FetchProtocolName(s.NetworkName)) + s.cancel() + return nil +} diff --git a/gen/main.go b/gen/main.go index 4fc610f9..2a418bc1 100644 --- a/gen/main.go +++ b/gen/main.go @@ -4,6 +4,7 @@ import ( "fmt" "os" + "github.com/filecoin-project/go-f3/certexchange" "github.com/filecoin-project/go-f3/certs" "github.com/filecoin-project/go-f3/gpbft" gen "github.com/whyrusleeping/cbor-gen" @@ -34,4 +35,12 @@ func main() { fmt.Println(err) os.Exit(1) } + err = gen.WriteTupleEncodersToFile("../certexchange/gen.go", "certexchange", + certexchange.Request{}, + certexchange.ResponseHeader{}, + ) + if err != nil { + fmt.Println(err) + os.Exit(1) + } } diff --git a/go.mod b/go.mod index 212389a3..bd815d4d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/filecoin-project/go-f3 -go 1.21 +go 1.22 require ( github.com/Kubuxu/go-broadcast v0.0.0-20240621161059-1a8c90734cd6 From e0083a0777509c645d6b07553ce96aed0abd783a Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 27 Jun 2024 09:30:00 -0700 Subject: [PATCH 02/43] revert golang version bump --- certexchange/polling/peerTracker.go | 4 ++-- go.mod | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/certexchange/polling/peerTracker.go b/certexchange/polling/peerTracker.go index 54077160..c7492436 100644 --- a/certexchange/polling/peerTracker.go +++ b/certexchange/polling/peerTracker.go @@ -3,7 +3,7 @@ package polling import ( "cmp" "container/heap" - "math/rand/v2" + "math/rand" "slices" "github.com/libp2p/go-libp2p/core/peer" @@ -261,7 +261,7 @@ func choose[T any](items []T, count int) []T { // There are more efficient algorithms for small sample sizes, but they're complex. chosen := make([]T, 0, count) for t := 0; len(chosen) < cap(chosen); t++ { - if rand.IntN(len(items)-t) < cap(chosen)-len(chosen) { + if rand.Intn(len(items)-t) < cap(chosen)-len(chosen) { chosen = append(chosen, items[t]) } } diff --git a/go.mod b/go.mod index bd815d4d..212389a3 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/filecoin-project/go-f3 -go 1.22 +go 1.21 require ( github.com/Kubuxu/go-broadcast v0.0.0-20240621161059-1a8c90734cd6 From adbf3b3c0392be822332f6d6d45af49f6fe770bd Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 27 Jun 2024 09:35:51 -0700 Subject: [PATCH 03/43] certexchange: []PowerEntry -> PowerEntries --- certexchange/protocol.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/certexchange/protocol.go b/certexchange/protocol.go index 0ea08299..8f91f972 100644 --- a/certexchange/protocol.go +++ b/certexchange/protocol.go @@ -24,5 +24,5 @@ type ResponseHeader struct { // The next instance to be finalized. This is 0 when no instances have been finalized. PendingInstance uint64 // Power table, if requested, or empty. - PowerTable []gpbft.PowerEntry + PowerTable gpbft.PowerEntries } From 9435cb5ebb4fb10d98449ed7f8ed93e689661601 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 27 Jun 2024 09:52:31 -0700 Subject: [PATCH 04/43] certexchange: better handle setup errors --- certexchange/polling/subscriber.go | 47 +++++++++++++----------------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/certexchange/polling/subscriber.go b/certexchange/polling/subscriber.go index 798b698a..76daa5c9 100644 --- a/certexchange/polling/subscriber.go +++ b/certexchange/polling/subscriber.go @@ -39,12 +39,22 @@ type Subscriber struct { func (s *Subscriber) Start() error { s.wg.Add(1) s.ctx, s.stop = context.WithCancel(context.Background()) + + discoveredPeers, err := s.libp2pDiscover(s.ctx) + if err != nil { + return err + } + + poller, err := NewPoller(s.ctx, &s.Client, s.Store, s.SignatureVerifier) + if err != nil { + return err + } + go func() { defer s.wg.Done() + defer s.stop() // in case we return early, cancel everything else. - ctx, cancel := context.WithCancel(s.ctx) - defer cancel() - if err := s.run(ctx); err != nil && s.ctx.Err() != nil { + if err := s.run(s.ctx, discoveredPeers, poller); err != nil && s.ctx.Err() != nil { s.Log.Errorf("polling certificate exchange subscriber exited early: %w", err) } }() @@ -53,8 +63,10 @@ func (s *Subscriber) Start() error { } func (s *Subscriber) Stop() error { - s.stop() - s.wg.Wait() + if s.stop == nil { + s.stop() + s.wg.Wait() + } return nil } @@ -123,15 +135,10 @@ func (s *Subscriber) libp2pDiscover(ctx context.Context) (<-chan peer.ID, error) return out, nil } -func (s *Subscriber) run(ctx context.Context) error { +func (s *Subscriber) run(ctx context.Context, discoveredPeers <-chan peer.ID, poller *Poller) error { timer := time.NewTimer(s.InitialPollInterval) defer timer.Stop() - discoveredPeers, err := s.libp2pDiscover(ctx) - if err != nil { - return err - } - predictor := newPredictor( 0.05, s.MinimumPollInterval, @@ -139,14 +146,7 @@ func (s *Subscriber) run(ctx context.Context) error { s.MaximumPollInterval, ) - poller, err := NewPoller(ctx, &s.Client, s.Store, s.SignatureVerifier) - if err != nil { - return err - } - for ctx.Err() == nil { - var err error - // Always handle newly discovered peers and new certificates from the certificate // store _first_. Then check the timer to see if we should poll. select { @@ -162,10 +162,9 @@ func (s *Subscriber) run(ctx context.Context) error { // was accurate, we'll keep predicting the same interval and we'll // never make any network requests. If we stop making local // progress, we'll start making network requests again. - var progress uint64 - progress, err = poller.CatchUp(ctx) + progress, err := poller.CatchUp(ctx) if err != nil { - break + return err } if progress > 0 { timer.Reset(predictor.update(progress)) @@ -174,17 +173,13 @@ func (s *Subscriber) run(ctx context.Context) error { progress, err = s.poll(ctx, poller) if err != nil { - break + return err } timer.Reset(predictor.update(progress)) case <-ctx.Done(): return ctx.Err() } } - - if err != nil { - return err - } } return ctx.Err() } From c2a53ec72b53d3cf6c08b5e0f935f11de0264419 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 27 Jun 2024 10:02:23 -0700 Subject: [PATCH 05/43] fix generated files --- certexchange/gen.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/certexchange/gen.go b/certexchange/gen.go index 52ceb479..079b5df1 100644 --- a/certexchange/gen.go +++ b/certexchange/gen.go @@ -143,7 +143,7 @@ func (t *ResponseHeader) MarshalCBOR(w io.Writer) error { return err } - // t.PowerTable ([]gpbft.PowerEntry) (slice) + // t.PowerTable (gpbft.PowerEntries) (slice) if len(t.PowerTable) > 8192 { return xerrors.Errorf("Slice value in field t.PowerTable was too long") } @@ -197,7 +197,7 @@ func (t *ResponseHeader) UnmarshalCBOR(r io.Reader) (err error) { t.PendingInstance = uint64(extra) } - // t.PowerTable ([]gpbft.PowerEntry) (slice) + // t.PowerTable (gpbft.PowerEntries) (slice) maj, extra, err = cr.ReadHeader() if err != nil { From 75692d85afc2ec94da3e04fc216d00c11a645284 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 27 Jun 2024 12:14:57 -0700 Subject: [PATCH 06/43] remove unused parameter --- certexchange/polling/predictor.go | 2 +- certexchange/polling/subscriber.go | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/certexchange/polling/predictor.go b/certexchange/polling/predictor.go index 8b570a5f..456bd654 100644 --- a/certexchange/polling/predictor.go +++ b/certexchange/polling/predictor.go @@ -9,7 +9,7 @@ const ( minExploreDistance = 100 * time.Millisecond ) -func newPredictor(targetAccuracy float64, minInterval, defaultInterval, maxInterval time.Duration) *predictor { +func newPredictor(minInterval, defaultInterval, maxInterval time.Duration) *predictor { return &predictor{ minInterval: minInterval, maxInterval: maxInterval, diff --git a/certexchange/polling/subscriber.go b/certexchange/polling/subscriber.go index 76daa5c9..2321e6a3 100644 --- a/certexchange/polling/subscriber.go +++ b/certexchange/polling/subscriber.go @@ -140,7 +140,6 @@ func (s *Subscriber) run(ctx context.Context, discoveredPeers <-chan peer.ID, po defer timer.Stop() predictor := newPredictor( - 0.05, s.MinimumPollInterval, s.InitialPollInterval, s.MaximumPollInterval, From 046577dba75257215917d90d76e11d5c622e973f Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 27 Jun 2024 17:04:05 -0700 Subject: [PATCH 07/43] remove preferential new-peer handling It was necessary when we subscribed to new certificates, but not anymore. --- certexchange/polling/subscriber.go | 47 +++++++++++++----------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/certexchange/polling/subscriber.go b/certexchange/polling/subscriber.go index 2321e6a3..c28a6e40 100644 --- a/certexchange/polling/subscriber.go +++ b/certexchange/polling/subscriber.go @@ -146,38 +146,31 @@ func (s *Subscriber) run(ctx context.Context, discoveredPeers <-chan peer.ID, po ) for ctx.Err() == nil { - // Always handle newly discovered peers and new certificates from the certificate - // store _first_. Then check the timer to see if we should poll. select { case p := <-discoveredPeers: s.peerTracker.peerSeen(p) - default: - select { - case p := <-discoveredPeers: - s.peerTracker.peerSeen(p) - case <-timer.C: - // First, see if we made progress locally. If we have, update - // interval prediction based on that local progress. If our interval - // was accurate, we'll keep predicting the same interval and we'll - // never make any network requests. If we stop making local - // progress, we'll start making network requests again. - progress, err := poller.CatchUp(ctx) - if err != nil { - return err - } - if progress > 0 { - timer.Reset(predictor.update(progress)) - break - } - - progress, err = s.poll(ctx, poller) - if err != nil { - return err - } + case <-timer.C: + // First, see if we made progress locally. If we have, update + // interval prediction based on that local progress. If our interval + // was accurate, we'll keep predicting the same interval and we'll + // never make any network requests. If we stop making local + // progress, we'll start making network requests again. + progress, err := poller.CatchUp(ctx) + if err != nil { + return err + } + if progress > 0 { timer.Reset(predictor.update(progress)) - case <-ctx.Done(): - return ctx.Err() + break + } + + progress, err = s.poll(ctx, poller) + if err != nil { + return err } + timer.Reset(predictor.update(progress)) + case <-ctx.Done(): + return ctx.Err() } } return ctx.Err() From a8fdf91b35923c2d469efdcf80a262ede0a09d74 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 27 Jun 2024 17:14:47 -0700 Subject: [PATCH 08/43] remove time from the interval predictor It makes it harder to test. --- certexchange/polling/predictor.go | 17 +++-------------- certexchange/polling/subscriber.go | 19 ++++++++++--------- 2 files changed, 13 insertions(+), 23 deletions(-) diff --git a/certexchange/polling/predictor.go b/certexchange/polling/predictor.go index 456bd654..60bcfd1d 100644 --- a/certexchange/polling/predictor.go +++ b/certexchange/polling/predictor.go @@ -24,7 +24,6 @@ func newPredictor(minInterval, defaultInterval, maxInterval time.Duration) *pred type predictor struct { minInterval, maxInterval time.Duration - next time.Time interval time.Duration wasIncreasing bool exploreDistance time.Duration @@ -88,21 +87,11 @@ func (p *predictor) update(progress uint64) time.Duration { } // Apply either the backoff or predicted the interval. + nextInterval := p.interval if p.backoff > 0 { + nextInterval = p.backoff p.backoff = min(2*p.backoff, maxBackoffMultiplier*p.maxInterval) - p.next = p.next.Add(p.backoff) - } else { - p.next = p.next.Add(p.interval) } + return nextInterval - // Polling takes time. If we run behind, predict that we should poll again immediately. We - // enforce a minimum interval above so this shouldn't be too often. - now := time.Now() - prediction := p.next.Sub(now) - if prediction < 0 { - p.next = now - return 0 - } - - return prediction } diff --git a/certexchange/polling/subscriber.go b/certexchange/polling/subscriber.go index c28a6e40..7eaa1cde 100644 --- a/certexchange/polling/subscriber.go +++ b/certexchange/polling/subscriber.go @@ -149,7 +149,7 @@ func (s *Subscriber) run(ctx context.Context, discoveredPeers <-chan peer.ID, po select { case p := <-discoveredPeers: s.peerTracker.peerSeen(p) - case <-timer.C: + case pollTime := <-timer.C: // First, see if we made progress locally. If we have, update // interval prediction based on that local progress. If our interval // was accurate, we'll keep predicting the same interval and we'll @@ -159,16 +159,17 @@ func (s *Subscriber) run(ctx context.Context, discoveredPeers <-chan peer.ID, po if err != nil { return err } - if progress > 0 { - timer.Reset(predictor.update(progress)) - break + // Otherwise, poll the network. + if progress == 0 { + progress, err = s.poll(ctx, poller) + if err != nil { + return err + } } - progress, err = s.poll(ctx, poller) - if err != nil { - return err - } - timer.Reset(predictor.update(progress)) + nextInterval := predictor.update(progress) + nextPollTime := pollTime.Add(nextInterval) + timer.Reset(max(time.Until(nextPollTime), 0)) case <-ctx.Done(): return ctx.Err() } From 139089cbdeacf85c8a54d70db2e70a49efc632ec Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 27 Jun 2024 17:23:21 -0700 Subject: [PATCH 09/43] basic predictor test --- certexchange/polling/predictor_test.go | 60 ++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 certexchange/polling/predictor_test.go diff --git a/certexchange/polling/predictor_test.go b/certexchange/polling/predictor_test.go new file mode 100644 index 00000000..1c37a1e2 --- /dev/null +++ b/certexchange/polling/predictor_test.go @@ -0,0 +1,60 @@ +package polling + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestPredictor(t *testing.T) { + p := newPredictor(time.Second, 30*time.Second, 120*time.Second) + + // Progress of 1 shouldn't update anything. + require.Equal(t, 30*time.Second, p.update(1)) + require.Equal(t, 30*time.Second, p.update(1)) + + // Progress of 0 should predict the same interval, then twice that, etc. + require.Equal(t, 30*time.Second, p.update(0)) + require.Equal(t, 60*time.Second, p.update(0)) + + // After that, the intervalA should increase slightly. + intervalA := p.update(1) + require.Less(t, 30*time.Second, intervalA) + require.Greater(t, 40*time.Second, intervalA) + + // If the interval is too large, it should decrease, but not by as much because we're + // switching direction. + intervalB := p.update(2) + require.Less(t, 30*time.Second, intervalB) + require.Greater(t, intervalA, intervalB) + + // It should keep getting smaller. + intervalC := p.update(2) + require.Greater(t, intervalB, intervalC) + + // Until we stabilize. + require.Equal(t, intervalC, p.update(1)) + + // We should always stay above the minimum. + for i := 0; i < 100; i++ { + require.LessOrEqual(t, time.Second, p.update(2)) + } + require.Equal(t, time.Second, p.update(1)) + + // And below the maximum (unless backing off). + for i := 0; i < 100; i++ { + require.GreaterOrEqual(t, 120*time.Second, p.update(0)) + require.GreaterOrEqual(t, 120*time.Second, p.update(1)) + } + require.Equal(t, 120*time.Second, p.update(1)) + + // But backoff should go much higher. + for i := 0; i < 100; i++ { + require.GreaterOrEqual(t, 10*120*time.Second, p.update(0)) + } + require.Equal(t, 10*120*time.Second, p.update(0)) + + // And revert to the old time when done. + require.Equal(t, 120*time.Second, p.update(1)) +} From 5a3dbe04986f15fba9e271376bedf9c24fab38df Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 27 Jun 2024 20:11:31 -0700 Subject: [PATCH 10/43] test and improve predictor convergence --- certexchange/polling/predictor.go | 24 ++++++++++++--------- certexchange/polling/predictor_test.go | 30 ++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 10 deletions(-) diff --git a/certexchange/polling/predictor.go b/certexchange/polling/predictor.go index 60bcfd1d..a24935fc 100644 --- a/certexchange/polling/predictor.go +++ b/certexchange/polling/predictor.go @@ -4,10 +4,7 @@ import ( "time" ) -const ( - maxBackoffMultiplier = 10 - minExploreDistance = 100 * time.Millisecond -) +const maxBackoffMultiplier = 10 func newPredictor(minInterval, defaultInterval, maxInterval time.Duration) *predictor { return &predictor{ @@ -49,17 +46,24 @@ func (p *predictor) update(progress uint64) time.Duration { // If we've made too much progress (interval too long) or made no progress (interval // too short), explore to find the right interval. - // We halve the explore distance when we switch directions and double it whenever we - // need to keep moving in the same direction repeatedly. if p.wasIncreasing == (progress > 1) { - p.exploreDistance /= 2 - } else { + // We switched directions which means we're circling the target, shrink the + // explore distance. + p.exploreDistance /= 3 + } else if progress <= 2 { + // We repeatedly made no progress, or slightly too much progress. Double the + // explore distance. p.exploreDistance *= 2 + } else { + // We far away from our target and aren't polling often enough. Reset to a + // sane estimate. + p.interval /= time.Duration(progress) + p.exploreDistance = p.interval / 2 } // Make sure the explore distance doesn't get too short/long. - if p.exploreDistance < minExploreDistance { - p.exploreDistance = minExploreDistance + if p.exploreDistance < p.minInterval/100 { + p.exploreDistance = p.minInterval / 100 } else if p.exploreDistance > p.maxInterval/2 { p.exploreDistance = p.maxInterval / 2 } diff --git a/certexchange/polling/predictor_test.go b/certexchange/polling/predictor_test.go index 1c37a1e2..21ba4cd0 100644 --- a/certexchange/polling/predictor_test.go +++ b/certexchange/polling/predictor_test.go @@ -1,9 +1,11 @@ package polling import ( + "math/rand" "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -58,3 +60,31 @@ func TestPredictor(t *testing.T) { // And revert to the old time when done. require.Equal(t, 120*time.Second, p.update(1)) } + +func TestPredictorConverges(t *testing.T) { + const minSeconds = 1 + const maxSeconds = 120 + p := newPredictor(minSeconds*time.Second, 30*time.Second, maxSeconds*time.Second) + + updatesSeen := uint64(0) + eventInterval := 23 * time.Second + currentTime := time.Duration(0) + + converge := func(n int) time.Duration { + for i := 0; i < n; i++ { + newUpdatesSeen := uint64(currentTime / eventInterval) + currentTime += p.update(newUpdatesSeen - updatesSeen) + updatesSeen = newUpdatesSeen + } + return p.update(1) + } + + r := rand.New(rand.NewSource(0xdeadbeef)) + numbers := r.Perm(maxSeconds - minSeconds) + for _, n := range numbers { + eventInterval = time.Duration(n+minSeconds) * time.Second + result := converge(300) + assert.InEpsilon(t, eventInterval, result, 0.05, "actual %s, expected %s", result, eventInterval) + } + +} From e9a2f260a03ad6d442fd1eb1b9719bf2b66958c2 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 28 Jun 2024 10:36:53 -0700 Subject: [PATCH 11/43] improve convergence test --- certexchange/polling/predictor_test.go | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/certexchange/polling/predictor_test.go b/certexchange/polling/predictor_test.go index 21ba4cd0..a54adc5c 100644 --- a/certexchange/polling/predictor_test.go +++ b/certexchange/polling/predictor_test.go @@ -66,25 +66,30 @@ func TestPredictorConverges(t *testing.T) { const maxSeconds = 120 p := newPredictor(minSeconds*time.Second, 30*time.Second, maxSeconds*time.Second) - updatesSeen := uint64(0) - eventInterval := 23 * time.Second - currentTime := time.Duration(0) - - converge := func(n int) time.Duration { + converge := func(interval time.Duration, n int) (time.Duration, time.Duration, int) { + currentTime := time.Duration(0) + updatesSeen := uint64(0) for i := 0; i < n; i++ { - newUpdatesSeen := uint64(currentTime / eventInterval) + newUpdatesSeen := uint64(currentTime / interval) currentTime += p.update(newUpdatesSeen - updatesSeen) updatesSeen = newUpdatesSeen } - return p.update(1) + return p.update(1), currentTime, int(currentTime / interval) + } + + // Converges from 30s -> 5s very quickly. + { + res, ellapsed, count := converge(5*time.Second, 10) + assert.InDelta(t, 5*time.Second, res, float64(1*time.Second)) + assert.Less(t, ellapsed, 3*time.Minute) + assert.Less(t, count, 30) } r := rand.New(rand.NewSource(0xdeadbeef)) numbers := r.Perm(maxSeconds - minSeconds) for _, n := range numbers { - eventInterval = time.Duration(n+minSeconds) * time.Second - result := converge(300) + eventInterval := time.Duration(n+minSeconds) * time.Second + result, _, _ := converge(eventInterval, 300) assert.InEpsilon(t, eventInterval, result, 0.05, "actual %s, expected %s", result, eventInterval) } - } From 1b159ad4a8df9d709e66d342e2328f03903bf48b Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 28 Jun 2024 11:05:10 -0700 Subject: [PATCH 12/43] test hit/miss tracking --- certexchange/polling/peerTracker.go | 4 +- certexchange/polling/peerTracker_test.go | 87 ++++++++++++++++++++++++ 2 files changed, 89 insertions(+), 2 deletions(-) create mode 100644 certexchange/polling/peerTracker_test.go diff --git a/certexchange/polling/peerTracker.go b/certexchange/polling/peerTracker.go index c7492436..c8fa7d19 100644 --- a/certexchange/polling/peerTracker.go +++ b/certexchange/polling/peerTracker.go @@ -127,7 +127,7 @@ func (r *peerRecord) recordMiss() { } } -// Return the hit rate and the +// Return the hit rate a number between 0-10 indicating how "full" our window is. func (r *peerRecord) hitRate() (float64, int) { total := r.hits + r.misses // set the default rate such that we we ask `defaultRequests` peers by default. @@ -135,7 +135,7 @@ func (r *peerRecord) hitRate() (float64, int) { if total > 0 { rate = float64(r.hits) / float64(total) } - return rate, total + return rate, min(total, 10) } diff --git a/certexchange/polling/peerTracker_test.go b/certexchange/polling/peerTracker_test.go new file mode 100644 index 00000000..320ffb53 --- /dev/null +++ b/certexchange/polling/peerTracker_test.go @@ -0,0 +1,87 @@ +package polling + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPeerRecordHitMiss(t *testing.T) { + r := new(peerRecord) + + { + hitRate, count := r.hitRate() + // should initialize to some "assumed" positive value. + require.Greater(t, hitRate, 0.0) + require.Equal(t, 0, count) + } + + // 3 hits + for i := 1; i <= 3; i++ { + r.recordHit() + hitRate, count := r.hitRate() + require.Equal(t, 1.0, hitRate) + require.Equal(t, i, count) + } + + // 3 misses + for i := 4; i <= 6; i++ { + r.recordMiss() + hitRate, count := r.hitRate() + require.Less(t, hitRate, 1.0) + require.Equal(t, i, count) + } + + // Should be 50/50. + { + hitRate, count := r.hitRate() + require.Equal(t, 6, count) + require.Equal(t, 0.5, hitRate) + } + + // 10 hits + for i := 0; i < 7; i++ { + r.recordHit() + } + + // Still shouldn't be 100% hit, but should be "full" + { + hitRate, count := r.hitRate() + require.Less(t, hitRate, 1.0) + require.Equal(t, 10, count) + } + + // 3 more hits + for i := 0; i <= 3; i++ { + r.recordHit() + } + + // Should now be 100% hit (canceling out the misses). + { + hitRate, count := r.hitRate() + require.Equal(t, hitRate, 1.0) + require.Equal(t, 10, count) + } + + // 10 misses should bring us back to 50/50 + + for i := 0; i < 10; i++ { + r.recordMiss() + } + + { + hitRate, _ := r.hitRate() + require.Equal(t, hitRate, 0.5) + } + + // Another 10 should bring us to 0.0 + + for i := 0; i < 10; i++ { + r.recordMiss() + } + + { + hitRate, _ := r.hitRate() + require.Equal(t, hitRate, 0.0) + } +} From 665d845bfc6c7e3ca321d2241108d9240030a670 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 28 Jun 2024 15:14:43 -0700 Subject: [PATCH 13/43] improve initialization --- certexchange/polling/discovery.go | 83 ++++++++++++++++++ certexchange/polling/peerTracker.go | 6 ++ certexchange/polling/subscriber.go | 125 ++++++++-------------------- 3 files changed, 124 insertions(+), 90 deletions(-) create mode 100644 certexchange/polling/discovery.go diff --git a/certexchange/polling/discovery.go b/certexchange/polling/discovery.go new file mode 100644 index 00000000..e639ffe8 --- /dev/null +++ b/certexchange/polling/discovery.go @@ -0,0 +1,83 @@ +package polling + +import ( + "context" + "slices" + + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + "github.com/filecoin-project/go-f3/certexchange" + "github.com/filecoin-project/go-f3/gpbft" +) + +func discoverPeers(ctx context.Context, h host.Host, nn gpbft.NetworkName) (<-chan peer.ID, error) { + out := make(chan peer.ID, 256) + discoveryEvents, err := h.EventBus().Subscribe([]any{ + new(event.EvtPeerIdentificationCompleted), + new(event.EvtPeerProtocolsUpdated), + }) + if err != nil { + return nil, err + } + + targetProtocol := certexchange.FetchProtocolName(nn) + + // record existing peers. +fillInitialPeers: + for _, p := range h.Network().Peers() { + if proto, err := h.Peerstore().FirstSupportedProtocol(p, targetProtocol); err == nil && proto == targetProtocol { + select { + case out <- p: + default: + // Don't block because we've subscribed to libp2p events. + break fillInitialPeers + } + } + } + + // Then start listening for new peers + go func() { + defer close(out) + defer discoveryEvents.Close() + + for { + var ( + evt any + ok bool + ) + select { + case evt, ok = <-discoveryEvents.Out(): + case <-ctx.Done(): + } + if !ok { + return + } + + var protos []protocol.ID + var peer peer.ID + switch e := evt.(type) { + case *event.EvtPeerIdentificationCompleted: + protos = e.Protocols + peer = e.Peer + case *event.EvtPeerProtocolsUpdated: + protos = e.Added + peer = e.Peer + default: + continue + } + if slices.Contains(protos, targetProtocol) { + // If the channel is full, ignore newly discovered peers. We + // likely have enough anyways and we'll drain the channel + // eventually. + select { + case out <- peer: + default: + } + } + } + }() + return out, nil +} diff --git a/certexchange/polling/peerTracker.go b/certexchange/polling/peerTracker.go index c8fa7d19..b2497e10 100644 --- a/certexchange/polling/peerTracker.go +++ b/certexchange/polling/peerTracker.go @@ -52,6 +52,12 @@ type backoffRecord struct { delayUntil int } +func newPeerTracker() *peerTracker { + return &peerTracker{ + peers: make(map[peer.ID]*peerRecord), + } +} + type peerTracker struct { // TODO: garbage collect this. peers map[peer.ID]*peerRecord diff --git a/certexchange/polling/subscriber.go b/certexchange/polling/subscriber.go index 7eaa1cde..484d7d80 100644 --- a/certexchange/polling/subscriber.go +++ b/certexchange/polling/subscriber.go @@ -3,13 +3,10 @@ package polling import ( "context" "fmt" - "slices" "sync" "time" - "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" "github.com/filecoin-project/go-f3/certexchange" "github.com/filecoin-project/go-f3/certstore" @@ -28,33 +25,45 @@ type Subscriber struct { MaximumPollInterval time.Duration MinimumPollInterval time.Duration - peerTracker peerTracker + peerTracker *peerTracker + poller *Poller + discoverCh <-chan peer.ID - wg sync.WaitGroup - - ctx context.Context + wg sync.WaitGroup stop context.CancelFunc } func (s *Subscriber) Start() error { - s.wg.Add(1) - s.ctx, s.stop = context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + s.stop = cancel - discoveredPeers, err := s.libp2pDiscover(s.ctx) + var err error + + s.peerTracker = newPeerTracker() + s.poller, err = NewPoller(ctx, &s.Client, s.Store, s.SignatureVerifier) if err != nil { return err } - poller, err := NewPoller(s.ctx, &s.Client, s.Store, s.SignatureVerifier) + s.discoverCh, err = discoverPeers(ctx, s.Host, s.NetworkName) if err != nil { return err } + s.wg.Add(1) go func() { - defer s.wg.Done() - defer s.stop() // in case we return early, cancel everything else. + defer func() { + // in case we return early, cancel. + s.stop() + // and wait for discovery to exit. + for range s.discoverCh { + } + + // then we're done + s.wg.Done() + }() - if err := s.run(s.ctx, discoveredPeers, poller); err != nil && s.ctx.Err() != nil { + if err := s.run(ctx); err != nil && ctx.Err() != nil { s.Log.Errorf("polling certificate exchange subscriber exited early: %w", err) } }() @@ -63,7 +72,7 @@ func (s *Subscriber) Start() error { } func (s *Subscriber) Stop() error { - if s.stop == nil { + if s.stop != nil { s.stop() s.wg.Wait() } @@ -71,71 +80,7 @@ func (s *Subscriber) Stop() error { return nil } -// Discover new peers. -func (s *Subscriber) libp2pDiscover(ctx context.Context) (<-chan peer.ID, error) { - out := make(chan peer.ID, 256) - discoveryEvents, err := s.Host.EventBus().Subscribe([]any{ - new(event.EvtPeerIdentificationCompleted), - new(event.EvtPeerProtocolsUpdated), - }) - if err != nil { - return nil, err - } - - targetProtocol := certexchange.FetchProtocolName(s.NetworkName) - - // Mark already connected peers as "seen". - for _, p := range s.Host.Network().Peers() { - if proto, err := s.Host.Peerstore().FirstSupportedProtocol(p, targetProtocol); err == nil && proto == targetProtocol { - s.peerTracker.peerSeen(p) - } - } - - // Then start listening for new peers - s.wg.Add(1) - go func() { - defer s.wg.Done() - defer discoveryEvents.Close() - for { - var ( - evt any - ok bool - ) - select { - case evt, ok = <-discoveryEvents.Out(): - case <-ctx.Done(): - } - if !ok { - return - } - - var protos []protocol.ID - var peer peer.ID - switch e := evt.(type) { - case *event.EvtPeerIdentificationCompleted: - protos = e.Protocols - peer = e.Peer - case *event.EvtPeerProtocolsUpdated: - protos = e.Added - peer = e.Peer - default: - continue - } - if slices.Contains(protos, targetProtocol) { - // If the channel is full, ignore newly discovered peers. We - // likely have enough anyways and we'll drain the channel - // eventually. - select { - case out <- peer: - default: - } - } - } - }() - return out, nil -} - -func (s *Subscriber) run(ctx context.Context, discoveredPeers <-chan peer.ID, poller *Poller) error { +func (s *Subscriber) run(ctx context.Context) error { timer := time.NewTimer(s.InitialPollInterval) defer timer.Stop() @@ -147,7 +92,7 @@ func (s *Subscriber) run(ctx context.Context, discoveredPeers <-chan peer.ID, po for ctx.Err() == nil { select { - case p := <-discoveredPeers: + case p := <-s.discoverCh: s.peerTracker.peerSeen(p) case pollTime := <-timer.C: // First, see if we made progress locally. If we have, update @@ -155,13 +100,13 @@ func (s *Subscriber) run(ctx context.Context, discoveredPeers <-chan peer.ID, po // was accurate, we'll keep predicting the same interval and we'll // never make any network requests. If we stop making local // progress, we'll start making network requests again. - progress, err := poller.CatchUp(ctx) + progress, err := s.poller.CatchUp(ctx) if err != nil { return err } // Otherwise, poll the network. if progress == 0 { - progress, err = s.poll(ctx, poller) + progress, err = s.poll(ctx) if err != nil { return err } @@ -177,21 +122,21 @@ func (s *Subscriber) run(ctx context.Context, discoveredPeers <-chan peer.ID, po return ctx.Err() } -func (s *Subscriber) poll(ctx context.Context, poller *Poller) (uint64, error) { +func (s *Subscriber) poll(ctx context.Context) (uint64, error) { var ( misses []peer.ID hits []peer.ID ) - start := poller.NextInstance + start := s.poller.NextInstance for _, peer := range s.peerTracker.suggestPeers() { - oldInstance := poller.NextInstance - res, err := poller.Poll(ctx, peer) + oldInstance := s.poller.NextInstance + res, err := s.poller.Poll(ctx, peer) if err != nil { - return poller.NextInstance - start, err + return s.poller.NextInstance - start, err } // If we manage to advance, all old "hits" are actually misses. - if oldInstance < poller.NextInstance { + if oldInstance < s.poller.NextInstance { misses = append(misses, hits...) hits = hits[:0] } @@ -212,7 +157,7 @@ func (s *Subscriber) poll(ctx context.Context, poller *Poller) (uint64, error) { // If we've made progress, record hits/misses. Otherwise, we just have to assume that we // asked too soon. - progress := poller.NextInstance - start + progress := s.poller.NextInstance - start if progress > 0 { for _, p := range misses { s.peerTracker.recordMiss(p) From ce583dac65c636d622ebef81e8b88c5bc1627d3e Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 28 Jun 2024 16:43:09 -0700 Subject: [PATCH 14/43] test peer tracker --- certexchange/polling/peerTracker.go | 57 ++++++++---- certexchange/polling/peerTracker_test.go | 107 +++++++++++++++++++++++ 2 files changed, 149 insertions(+), 15 deletions(-) diff --git a/certexchange/polling/peerTracker.go b/certexchange/polling/peerTracker.go index b2497e10..8ab0a632 100644 --- a/certexchange/polling/peerTracker.go +++ b/certexchange/polling/peerTracker.go @@ -19,6 +19,8 @@ const ( minRequests = 4 // The maximum number of requests to make, even if all of our peers appear to be unreliable. maxRequests = 32 + // How confident should we be that we've suggested enough peers. 1.125 == 112.5% + targetConfidence = 1.125 ) type peerState int @@ -26,6 +28,7 @@ type peerState int const ( peerEvil peerState = iota - 1 peerInactive + peerDeactivating peerActive ) @@ -95,7 +98,11 @@ func (b *backoffHeap) Less(i int, j int) bool { // Pop implements heap.Interface. func (b *backoffHeap) Pop() any { - return (*b)[len(*b)-1] + s := (*b) + item := s[len(s)-1] + s[len(s)-1] = nil + *b = s[:len(s)-1] + return item } // Push implements heap.Interface. @@ -110,9 +117,21 @@ func (b *backoffHeap) Swap(i int, j int) { // Records a failed request and returns how many rounds we should avoid picking this peer for. func (r *peerRecord) recordFailure() int { + delay := 1 << min(r.sequentialFailures, maxBackoffExponent) + + // failures are misses as well. + if r.misses < hitMissSlidingWindow { + r.misses++ + } else if r.hits > 0 { + r.hits-- + } + r.sequentialFailures++ - r.state = peerInactive - return 1 << min(r.sequentialFailures, maxBackoffExponent) + if r.state == peerActive { + r.state = peerDeactivating + } + + return delay } func (r *peerRecord) recordHit() { @@ -137,7 +156,7 @@ func (r *peerRecord) recordMiss() { func (r *peerRecord) hitRate() (float64, int) { total := r.hits + r.misses // set the default rate such that we we ask `defaultRequests` peers by default. - rate := float64(1) / defaultRequests + rate := targetConfidence / defaultRequests if total > 0 { rate = float64(r.hits) / float64(total) } @@ -177,11 +196,14 @@ func (t *peerTracker) recordHit(p peer.ID) { func (t *peerTracker) makeActive(p peer.ID) { r := t.getOrCreate(p) - if r.state != peerInactive { + switch r.state { + case peerEvil, peerActive: return + case peerInactive: + t.active = append(t.active, p) + case peerDeactivating: } r.state = peerActive - t.active = append(t.active, p) } func (t *peerTracker) peerSeen(p peer.ID) { @@ -196,28 +218,34 @@ func (t *peerTracker) peerSeen(p peer.ID) { // // TODO: Add a multiplier if we're not making progress. func (t *peerTracker) suggestPeers() []peer.ID { - // XXX: this should be a param. - const targetProbability = 1.1 - // Advance the round and move peers from backoff to active, if necessary. - t.round++ for t.backoff.Len() > 0 { - r := t.backoff[0] + r := t.backoff[len(t.backoff)-1] if r.delayUntil > t.round { break } heap.Pop(&t.backoff) t.makeActive(r.peer) } + t.round++ // Sort from best to worst. slices.SortFunc(t.active, func(a, b peer.ID) int { return t.getOrCreate(b).Cmp(t.getOrCreate(a)) }) // Trim off any inactive/evil peers from the end, they'll be sorted last. - for l := len(t.active); l > 0 && t.getOrCreate(t.active[l-1]).state != peerActive; l-- { +trimLoop: + for l := len(t.active); l > 0; l-- { + r := t.getOrCreate(t.active[l-1]) + switch r.state { + case peerActive: + break trimLoop + case peerDeactivating: + r.state = peerInactive + } t.active = t.active[:l] } + var prob float64 var peerCount int for _, p := range t.active { @@ -233,8 +261,7 @@ func (t *peerTracker) suggestPeers() []peer.ID { if peerCount >= maxRequests { break } - // Keep going till we're 110% sure. - if prob >= targetProbability { + if prob >= targetConfidence { break } } @@ -243,7 +270,7 @@ func (t *peerTracker) suggestPeers() []peer.ID { if peerCount == len(t.active) { // We've chosen all peers, nothing else we can do. - } else if prob < targetProbability { + } else if prob < targetConfidence { // If we failed to reach the target probability, choose randomly from the remaining // peers. chosen = append(chosen, choose(t.active[peerCount:], maxRequests-peerCount)...) diff --git a/certexchange/polling/peerTracker_test.go b/certexchange/polling/peerTracker_test.go index 320ffb53..343bba38 100644 --- a/certexchange/polling/peerTracker_test.go +++ b/certexchange/polling/peerTracker_test.go @@ -3,6 +3,8 @@ package polling import ( "testing" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/test" "github.com/stretchr/testify/require" ) @@ -85,3 +87,108 @@ func TestPeerRecordHitMiss(t *testing.T) { require.Equal(t, hitRate, 0.0) } } + +func TestPeerRecordExponentialBackoff(t *testing.T) { + r := new(peerRecord) + require.Equal(t, 1, r.recordFailure()) + require.Equal(t, 2, r.recordFailure()) + require.Equal(t, 4, r.recordFailure()) + + // clears backoff. + r.recordHit() + require.Equal(t, 1, r.recordFailure()) + + // backoff stops eventually + for i := 0; i < 100; i++ { + r.recordFailure() + } + require.Equal(t, 1< Date: Sun, 30 Jun 2024 18:09:16 -0700 Subject: [PATCH 15/43] fix include power-table bug --- certexchange/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/certexchange/server.go b/certexchange/server.go index 483f0eb6..4f92fe56 100644 --- a/certexchange/server.go +++ b/certexchange/server.go @@ -69,7 +69,7 @@ func (s *Server) handleRequest(ctx context.Context, stream network.Stream) (_err resp.PendingInstance = latest.GPBFTInstance + 1 } - if resp.PendingInstance <= req.FirstInstance { + if resp.PendingInstance >= req.FirstInstance && req.IncludePowerTable { pt, err := s.Store.GetPowerTable(ctx, req.FirstInstance) if err != nil { s.Log.Errorf("failed to load power table: %w", err) From 9d60913a1db3db58655f59512d61563482d59942 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Sun, 30 Jun 2024 18:14:45 -0700 Subject: [PATCH 16/43] remove special-casing for zero limit --- certexchange/client.go | 2 +- certexchange/protocol.go | 5 +++++ certexchange/server.go | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/certexchange/client.go b/certexchange/client.go index 88a528c9..093893a0 100644 --- a/certexchange/client.go +++ b/certexchange/client.go @@ -114,7 +114,7 @@ func (c *Client) Request(ctx context.Context, p peer.ID, req *Request) (_rh *Res } }() defer close(ch) - for i := uint64(0); request.Limit == 0 || i < request.Limit; i++ { + for i := uint64(0); i < request.Limit; i++ { cert := new(certs.FinalityCertificate) // We'll read at most 1MiB per certificate. They generally shouldn't be that diff --git a/certexchange/protocol.go b/certexchange/protocol.go index 8f91f972..bca02eeb 100644 --- a/certexchange/protocol.go +++ b/certexchange/protocol.go @@ -1,6 +1,8 @@ package certexchange import ( + "math" + "github.com/filecoin-project/go-f3/gpbft" "github.com/libp2p/go-libp2p/core/protocol" ) @@ -9,6 +11,9 @@ func FetchProtocolName(nn gpbft.NetworkName) protocol.ID { return protocol.ID("/f3/certexch/get/1/" + string(nn)) } +// Request unlimited certificates. +const NoLimit uint64 = math.MaxUint64 + type Request struct { // First instance to fetch. FirstInstance uint64 diff --git a/certexchange/server.go b/certexchange/server.go index 4f92fe56..78d0670c 100644 --- a/certexchange/server.go +++ b/certexchange/server.go @@ -61,7 +61,7 @@ func (s *Server) handleRequest(ctx context.Context, stream network.Stream) (_err } limit := req.Limit - if limit == 0 || limit > maxResponseLen { + if limit > maxResponseLen { limit = maxResponseLen } var resp ResponseHeader From 81427cfcb229586c3231fbd304b74bb4cf2d54ab Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Sun, 30 Jun 2024 18:20:43 -0700 Subject: [PATCH 17/43] basic client/server test --- certexchange/protocol_test.go | 153 ++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 certexchange/protocol_test.go diff --git a/certexchange/protocol_test.go b/certexchange/protocol_test.go new file mode 100644 index 00000000..c4be39fb --- /dev/null +++ b/certexchange/protocol_test.go @@ -0,0 +1,153 @@ +package certexchange_test + +import ( + "context" + "testing" + + "github.com/filecoin-project/go-f3/certexchange" + "github.com/filecoin-project/go-f3/certs" + "github.com/filecoin-project/go-f3/certstore" + "github.com/filecoin-project/go-f3/gpbft" + + "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + logging "github.com/ipfs/go-log/v2" + mocknetwork "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/require" +) + +const testNetworkName gpbft.NetworkName = "testnet" + +var log = logging.Logger("certexchange-test") + +func testPowerTable(entries int64) (gpbft.PowerEntries, gpbft.CID) { + powerTable := make(gpbft.PowerEntries, entries) + + for i := range powerTable { + powerTable[i] = gpbft.PowerEntry{ + ID: gpbft.ActorID(i + 1), + Power: gpbft.NewStoragePower(int64(len(powerTable)*2 - i/2)), + PubKey: []byte("fake key"), + } + } + k, err := certs.MakePowerTableCID(powerTable) + if err != nil { + panic(err) + } + return powerTable, k +} + +func TestClientServer(t *testing.T) { + mocknet := mocknetwork.New() + h1, err := mocknet.GenPeer() + require.NoError(t, err) + h2, err := mocknet.GenPeer() + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + require.NoError(t, mocknet.LinkAll()) + + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + pt, pcid := testPowerTable(10) + supp := gpbft.SupplementalData{PowerTable: pcid} + + cs, err := certstore.CreateStore(ctx, ds, 0, pt) + require.NoError(t, err) + + cert := &certs.FinalityCertificate{GPBFTInstance: 0, SupplementalData: supp} + err = cs.Put(ctx, cert) + require.NoError(t, err) + + cert = &certs.FinalityCertificate{GPBFTInstance: 1, SupplementalData: supp} + err = cs.Put(ctx, cert) + require.NoError(t, err) + + server := certexchange.Server{ + NetworkName: testNetworkName, + Host: h1, + Store: cs, + Log: log, + } + + client := certexchange.Client{ + Host: h2, + NetworkName: testNetworkName, + Log: log, + } + + require.NoError(t, server.Start()) + defer server.Stop() + + require.NoError(t, mocknet.ConnectAllButSelf()) + + // No certificates, but get the power table. + { + head, certs, err := client.Request(ctx, h1.ID(), &certexchange.Request{ + FirstInstance: 0, + Limit: 0, + IncludePowerTable: true, + }) + require.NoError(t, err) + require.EqualValues(t, 2, head.PendingInstance) + require.EqualValues(t, pt, head.PowerTable) + require.Empty(t, certs) // limit == 0 + } + + // All certificates and no power table. + { + head, certs, err := client.Request(ctx, h1.ID(), &certexchange.Request{ + FirstInstance: 0, + Limit: certexchange.NoLimit, + IncludePowerTable: false, + }) + + require.NoError(t, err) + require.EqualValues(t, 2, head.PendingInstance) + require.Nil(t, head.PowerTable) + + expectInstance := uint64(0) + for c := range certs { + require.Equal(t, expectInstance, c.GPBFTInstance) + expectInstance++ + } + require.Equal(t, expectInstance, head.PendingInstance) + } + + // Just one certificate + { + _, certs, err := client.Request(ctx, h1.ID(), &certexchange.Request{ + FirstInstance: 0, + Limit: 1, + }) + require.NoError(t, err) + + require.Equal(t, uint64(0), (<-certs).GPBFTInstance) + require.Empty(t, certs) + } + + // Should be able to get the power table beyond the last instance. + { + head, certs, err := client.Request(ctx, h1.ID(), &certexchange.Request{ + FirstInstance: 2, + Limit: certexchange.NoLimit, + IncludePowerTable: true, + }) + require.NoError(t, err) + require.EqualValues(t, pt, head.PowerTable) + require.Empty(t, certs) + } + + // Should get nothing beyond that + { + head, certs, err := client.Request(ctx, h1.ID(), &certexchange.Request{ + FirstInstance: 3, + Limit: certexchange.NoLimit, + IncludePowerTable: true, + }) + require.NoError(t, err) + require.Nil(t, head.PowerTable) + require.Empty(t, certs) + } +} From 0aa7d56703dc6abea1785bade026afa8ce237ff3 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Sun, 30 Jun 2024 18:29:45 -0700 Subject: [PATCH 18/43] don't return instances beyond pending, even if we have them --- certexchange/server.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/certexchange/server.go b/certexchange/server.go index 78d0670c..e9b22d6f 100644 --- a/certexchange/server.go +++ b/certexchange/server.go @@ -84,7 +84,15 @@ func (s *Server) handleRequest(ctx context.Context, stream network.Stream) (_err } if resp.PendingInstance > req.FirstInstance { - certs, err := s.Store.GetRange(ctx, req.FirstInstance, req.FirstInstance+limit) + // Only try to return up-to but not including the pending instance we just told the + // client about. Otherwise we could return instances _beyond_ that which is + // inconsistent and confusing. + end := req.FirstInstance + limit + if end >= resp.PendingInstance { + end = resp.PendingInstance - 1 + } + + certs, err := s.Store.GetRange(ctx, req.FirstInstance, end) if err == nil || errors.Is(err, certstore.ErrCertNotFound) { for i := range certs { if err := certs[i].MarshalCBOR(bw); err != nil { From 84ea97870e83e64058a91bf610080e27836aac6d Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Sun, 30 Jun 2024 18:32:38 -0700 Subject: [PATCH 19/43] two final protocol tests --- certexchange/protocol_test.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/certexchange/protocol_test.go b/certexchange/protocol_test.go index c4be39fb..19461b8e 100644 --- a/certexchange/protocol_test.go +++ b/certexchange/protocol_test.go @@ -150,4 +150,29 @@ func TestClientServer(t *testing.T) { require.Nil(t, head.PowerTable) require.Empty(t, certs) } + + // Until we've added a new certificate. + cert = &certs.FinalityCertificate{GPBFTInstance: 2, SupplementalData: supp} + require.NoError(t, cs.Put(ctx, cert)) + + { + _, certs, err := client.Request(ctx, h1.ID(), &certexchange.Request{ + FirstInstance: 2, + Limit: certexchange.NoLimit, + }) + require.NoError(t, err) + require.Equal(t, uint64(2), (<-certs).GPBFTInstance) + require.Empty(t, certs) + } + + { + head, certs, err := client.Request(ctx, h1.ID(), &certexchange.Request{ + FirstInstance: 3, + Limit: certexchange.NoLimit, + IncludePowerTable: true, + }) + require.NoError(t, err) + require.EqualValues(t, pt, head.PowerTable) + require.Empty(t, certs) + } } From 661145bcbdc4de8a7ea8935225ddddf7feecc79b Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Sun, 30 Jun 2024 21:24:52 -0700 Subject: [PATCH 20/43] fix request cancellation logic --- certexchange/client.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/certexchange/client.go b/certexchange/client.go index 093893a0..56ae9c92 100644 --- a/certexchange/client.go +++ b/certexchange/client.go @@ -51,7 +51,7 @@ func (c *Client) withDeadline(ctx context.Context) (context.Context, context.Can if c.RequestTimeout > 0 { return context.WithTimeout(ctx, c.RequestTimeout) } - return ctx, func() {} + return context.WithCancel(ctx) } // Request finality certificates from the specified peer. Returned finality certificates start at @@ -65,14 +65,19 @@ func (c *Client) Request(ctx context.Context, p peer.ID, req *Request) (_rh *Res }() ctx, cancel := c.withDeadline(ctx) - defer cancel() + defer func() { + if cancel != nil { + cancel() + } + }() proto := FetchProtocolName(c.NetworkName) stream, err := c.Host.NewStream(ctx, p, proto) if err != nil { return nil, nil, err } - defer resetOnCancel(ctx, stream)() + // canceled by the `cancel` function returned by withDeadline above. + _ = resetOnCancel(ctx, stream) if deadline, ok := ctx.Deadline(); ok { if err := stream.SetDeadline(deadline); err != nil { @@ -104,6 +109,10 @@ func (c *Client) Request(ctx context.Context, p peer.ID, req *Request) (_rh *Res return nil, nil, err } + // Copy/replace the cancel func so exiting the request doesn't cancel it. + cancelReq := cancel + cancel = nil + ch := make(chan *certs.FinalityCertificate, 1) // copy this in case the caller decides to re-use the request object... request := *req @@ -112,8 +121,9 @@ func (c *Client) Request(ctx context.Context, p peer.ID, req *Request) (_rh *Res if perr := recover(); perr != nil { c.Log.Errorf("panicked while receiving certificates from peer %s: %v\n%s", p, perr, string(debug.Stack())) } + cancelReq() + close(ch) }() - defer close(ch) for i := uint64(0); i < request.Limit; i++ { cert := new(certs.FinalityCertificate) From 35026c4ecf5f85992167adc8e9515ac97ff49cd0 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Sun, 30 Jun 2024 21:25:24 -0700 Subject: [PATCH 21/43] poll result stringer --- certexchange/polling/poller.go | 5 +++++ certexchange/polling/pollresult_string.go | 26 +++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 certexchange/polling/pollresult_string.go diff --git a/certexchange/polling/poller.go b/certexchange/polling/poller.go index f979ea4e..a03ee689 100644 --- a/certexchange/polling/poller.go +++ b/certexchange/polling/poller.go @@ -1,3 +1,4 @@ +//go:generate stringer -type=PollResult package polling import ( @@ -48,6 +49,10 @@ const ( PollIllegal ) +func (p PollResult) GoString() string { + return p.String() +} + // CatchUp attempts to advance to the latest instance from the certificate store without making any // network requests. It returns the number of instances we advanced. func (p *Poller) CatchUp(ctx context.Context) (uint64, error) { diff --git a/certexchange/polling/pollresult_string.go b/certexchange/polling/pollresult_string.go new file mode 100644 index 00000000..b7e18e00 --- /dev/null +++ b/certexchange/polling/pollresult_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=PollResult"; DO NOT EDIT. + +package polling + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[PollMiss-0] + _ = x[PollHit-1] + _ = x[PollFailed-2] + _ = x[PollIllegal-3] +} + +const _PollResult_name = "PollMissPollHitPollFailedPollIllegal" + +var _PollResult_index = [...]uint8{0, 8, 15, 25, 36} + +func (i PollResult) String() string { + if i < 0 || i >= PollResult(len(_PollResult_index)-1) { + return "PollResult(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _PollResult_name[_PollResult_index[i]:_PollResult_index[i+1]] +} From 44cd17c7115f6d98c69b4b3b2220b783e6bee551 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Sun, 30 Jun 2024 21:31:30 -0700 Subject: [PATCH 22/43] test poller --- certexchange/polling/common_test.go | 11 ++ certexchange/polling/poller_test.go | 198 ++++++++++++++++++++++++++++ certs/certs_test.go | 78 +---------- sim/justification.go | 86 ++++++++++++ 4 files changed, 299 insertions(+), 74 deletions(-) create mode 100644 certexchange/polling/common_test.go create mode 100644 certexchange/polling/poller_test.go create mode 100644 sim/justification.go diff --git a/certexchange/polling/common_test.go b/certexchange/polling/common_test.go new file mode 100644 index 00000000..ce6f9e47 --- /dev/null +++ b/certexchange/polling/common_test.go @@ -0,0 +1,11 @@ +package polling_test + +import ( + "github.com/filecoin-project/go-f3/gpbft" + + logging "github.com/ipfs/go-log/v2" +) + +const testNetworkName gpbft.NetworkName = "testnet" + +var log = logging.Logger("certexchange-poller-test") diff --git a/certexchange/polling/poller_test.go b/certexchange/polling/poller_test.go new file mode 100644 index 00000000..90001e23 --- /dev/null +++ b/certexchange/polling/poller_test.go @@ -0,0 +1,198 @@ +package polling_test + +import ( + "context" + "math/rand" + "testing" + + "github.com/filecoin-project/go-f3/certexchange" + "github.com/filecoin-project/go-f3/certexchange/polling" + "github.com/filecoin-project/go-f3/certs" + "github.com/filecoin-project/go-f3/certstore" + "github.com/filecoin-project/go-f3/gpbft" + "github.com/filecoin-project/go-f3/sim" + "github.com/filecoin-project/go-f3/sim/signing" + + "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + mocknetwork "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/require" +) + +func randomPowerTable(backend signing.Backend, entries int64) gpbft.PowerEntries { + powerTable := make(gpbft.PowerEntries, entries) + + for i := range powerTable { + key, _ := backend.GenerateKey() + powerTable[i] = gpbft.PowerEntry{ + ID: gpbft.ActorID(i + 1), + // Power chosen such that: + // - No small subset dominates the power table. + // - Lots of duplicate power values. + Power: gpbft.NewStoragePower(int64(len(powerTable)*2 - i/2)), + PubKey: key, + } + } + return powerTable +} + +func makeCertificate(t *testing.T, rng *rand.Rand, tsg *sim.TipSetGenerator, backend signing.Backend, base *gpbft.TipSet, instance uint64, powerTable, nextPowerTable gpbft.PowerEntries) *certs.FinalityCertificate { + chainLen := rng.Intn(23) + 1 + chain, err := gpbft.NewChain(*base) + require.NoError(t, err) + + for i := 0; i < chainLen; i++ { + chain = chain.Extend(tsg.Sample()) + } + + j, err := sim.MakeJustification(backend, testNetworkName, chain, instance, powerTable, nextPowerTable) + require.NoError(t, err) + + c, err := certs.NewFinalityCertificate(certs.MakePowerTableDiff(powerTable, nextPowerTable), j) + require.NoError(t, err) + + return c +} + +func TestPoller(t *testing.T) { + backend := signing.NewFakeBackend() + rng := rand.New(rand.NewSource(1234)) + + powerTable := randomPowerTable(backend, 10) + tableCid, err := certs.MakePowerTableCID(powerTable) + require.NoError(t, err) + + tsg := sim.NewTipSetGenerator(rng.Uint64()) + base := &gpbft.TipSet{Epoch: 0, Key: tsg.Sample(), PowerTable: tableCid} + + certificates := make([]*certs.FinalityCertificate, 1000) + for i := range certificates { + cert := makeCertificate(t, rng, tsg, backend, base, uint64(i), powerTable, powerTable) + base = cert.ECChain.Head() + certificates[i] = cert + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mocknet := mocknetwork.New() + const serverCount = 100 + + clientHost, err := mocknet.GenPeer() + require.NoError(t, err) + + serverHost, err := mocknet.GenPeer() + require.NoError(t, err) + + require.NoError(t, mocknet.LinkAll()) + + serverDs := ds_sync.MutexWrap(datastore.NewMapDatastore()) + + serverCs, err := certstore.CreateStore(ctx, serverDs, 0, powerTable) + require.NoError(t, err) + + certificatesAdded := 10 + for _, cert := range certificates[:certificatesAdded] { + require.NoError(t, serverCs.Put(ctx, cert)) + } + + server := certexchange.Server{ + NetworkName: testNetworkName, + Host: serverHost, + Store: serverCs, + Log: log, + } + + require.NoError(t, server.Start()) + defer server.Stop() + + clientDs := ds_sync.MutexWrap(datastore.NewMapDatastore()) + clientCs, err := certstore.CreateStore(ctx, clientDs, 0, powerTable) + require.NoError(t, err) + + client := certexchange.Client{ + Host: clientHost, + NetworkName: testNetworkName, + Log: log, + } + + poller, err := polling.NewPoller(ctx, &client, clientCs, backend) + require.NoError(t, err) + + require.NoError(t, mocknet.ConnectAllButSelf()) + + // Client should start with a clean slate. + { + i, err := poller.CatchUp(ctx) + require.NoError(t, err) + require.Zero(t, i) + require.Zero(t, poller.NextInstance) + } + + // Should catch up. Doing this twice should be considered a hit both times because the + // server is at least as caught up as us. + for i := 0; i < 2; i++ { + res, err := poller.Poll(ctx, serverHost.ID()) + require.NoError(t, err) + require.Equal(t, polling.PollHit, res) + require.Equal(t, uint64(certificatesAdded), poller.NextInstance) + } + + // If we put a certificate on the client, we should call it a _miss_ + { + require.NoError(t, clientCs.Put(ctx, certificates[certificatesAdded])) + + res, err := poller.Poll(ctx, serverHost.ID()) + require.NoError(t, err) + require.Equal(t, polling.PollMiss, res) + } + + // Add that cert to the server. + require.NoError(t, serverCs.Put(ctx, certificates[certificatesAdded])) + certificatesAdded++ + + // And now it's a hit! + { + res, err := poller.Poll(ctx, serverHost.ID()) + require.NoError(t, err) + require.Equal(t, polling.PollHit, res) + } + + // Add more than the request maximum (up till the last cert) + for ; certificatesAdded < len(certificates)-1; certificatesAdded++ { + require.NoError(t, serverCs.Put(ctx, certificates[certificatesAdded])) + } + + // We should poll multiple times and completely catch up. + { + res, err := poller.Poll(ctx, serverHost.ID()) + require.NoError(t, err) + require.Equal(t, polling.PollHit, res) + require.Equal(t, uint64(certificatesAdded), poller.NextInstance) + } + + // We catch evil servers! + { + lastCert := certificates[certificatesAdded] + lastCert.Signature = []byte("bad sig") + require.NoError(t, serverCs.Put(ctx, lastCert)) + + res, err := poller.Poll(ctx, serverHost.ID()) + require.NoError(t, err) + require.Equal(t, polling.PollIllegal, res) + + // And we don't store certificates from them! + require.Equal(t, uint64(certificatesAdded), poller.NextInstance) + _, err = clientCs.Get(ctx, lastCert.GPBFTInstance) + require.ErrorIs(t, err, certstore.ErrCertNotFound) + } + + // Stop the server, and make sure we get a failure. + server.Stop() + + { + res, err := poller.Poll(ctx, serverHost.ID()) + require.NoError(t, err) + require.Equal(t, polling.PollFailed, res) + } +} diff --git a/certs/certs_test.go b/certs/certs_test.go index fd608a58..a08a1aae 100644 --- a/certs/certs_test.go +++ b/certs/certs_test.go @@ -1,33 +1,21 @@ package certs_test import ( - "bytes" - "cmp" "fmt" "math/rand" "slices" "sort" "testing" - "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-f3/certs" "github.com/filecoin-project/go-f3/gpbft" "github.com/filecoin-project/go-f3/sim" "github.com/filecoin-project/go-f3/sim/signing" "github.com/stretchr/testify/require" - "golang.org/x/xerrors" ) const networkName = "f3test" -func makePowerTableCID(pt gpbft.PowerEntries) (gpbft.CID, error) { - var buf bytes.Buffer - if err := pt.MarshalCBOR(&buf); err != nil { - return nil, xerrors.Errorf("failed to serialize power table: %w", err) - } - return gpbft.MakeCid(buf.Bytes()), nil -} - func TestNoFinalityCertificates(t *testing.T) { backend := signing.NewFakeBackend() nextInstance, chain, newPowerTable, err := certs.ValidateFinalityCertificates(backend, networkName, nil, 0, nil) @@ -204,7 +192,7 @@ func TestFinalityCertificates(t *testing.T) { powerTable := randomPowerTable(backend, 100) maxPower := int64(len(powerTable) * 2) - tableCid, err := makePowerTableCID(powerTable) + tableCid, err := certs.MakePowerTableCID(powerTable) require.NoError(t, err) rng := rand.New(rand.NewSource(1234)) @@ -252,7 +240,7 @@ func TestBadFinalityCertificates(t *testing.T) { powerTable := randomPowerTable(backend, 100) rng := rand.New(rand.NewSource(1234)) tsg := sim.NewTipSetGenerator(rng.Uint64()) - tableCid, err := makePowerTableCID(powerTable) + tableCid, err := certs.MakePowerTableCID(powerTable) require.NoError(t, err) base := gpbft.TipSet{Epoch: 0, Key: tsg.Sample(), PowerTable: tableCid} @@ -475,10 +463,6 @@ func randomPowerTable(backend signing.Backend, entries int64) gpbft.PowerEntries func makeJustification(t *testing.T, rng *rand.Rand, tsg *sim.TipSetGenerator, backend signing.Backend, base gpbft.TipSet, instance uint64, powerTable, nextPowerTable gpbft.PowerEntries) *gpbft.Justification { chainLen := rng.Intn(23) + 1 - - scaledPowerTable, totalPower, err := powerTable.Scaled() - require.NoError(t, err) - chain, err := gpbft.NewChain(base) require.NoError(t, err) @@ -486,62 +470,8 @@ func makeJustification(t *testing.T, rng *rand.Rand, tsg *sim.TipSetGenerator, b chain = chain.Extend(tsg.Sample()) } - powerTableCid, err := makePowerTableCID(nextPowerTable) + j, err := sim.MakeJustification(backend, networkName, chain, instance, powerTable, nextPowerTable) require.NoError(t, err) - payload := gpbft.Payload{ - Instance: instance, - Round: 0, - Step: gpbft.DECIDE_PHASE, - SupplementalData: gpbft.SupplementalData{ - PowerTable: powerTableCid, - }, - Value: chain, - } - msg := backend.MarshalPayloadForSigning(networkName, &payload) - signers := rand.Perm(len(powerTable)) - signersBitfield := bitfield.New() - var signingPower uint16 - - type vote struct { - index int - sig []byte - pk gpbft.PubKey - } - - var votes []vote - for _, i := range signers { - pe := powerTable[i] - sig, err := backend.Sign(pe.PubKey, msg) - require.NoError(t, err) - votes = append(votes, vote{ - index: i, - sig: sig, - pk: pe.PubKey, - }) - - signersBitfield.Set(uint64(i)) - signingPower += scaledPowerTable[i] - if gpbft.IsStrongQuorum(signingPower, totalPower) { - break - } - } - slices.SortFunc(votes, func(a, b vote) int { - return cmp.Compare(a.index, b.index) - }) - pks := make([]gpbft.PubKey, len(votes)) - sigs := make([][]byte, len(votes)) - for i, vote := range votes { - pks[i] = vote.pk - sigs[i] = vote.sig - } - - sig, err := backend.Aggregate(pks, sigs) - require.NoError(t, err) - - return &gpbft.Justification{ - Vote: payload, - Signers: signersBitfield, - Signature: sig, - } + return j } diff --git a/sim/justification.go b/sim/justification.go new file mode 100644 index 00000000..0d4ff48f --- /dev/null +++ b/sim/justification.go @@ -0,0 +1,86 @@ +package sim + +import ( + "cmp" + "math/rand" + "slices" + + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-f3/certs" + "github.com/filecoin-project/go-f3/gpbft" + "github.com/filecoin-project/go-f3/sim/signing" +) + +// Generate a justification from the given power table. This assumes the signing backend can sign for all keys. +func MakeJustification(backend signing.Backend, nn gpbft.NetworkName, chain gpbft.ECChain, instance uint64, powerTable, nextPowerTable gpbft.PowerEntries) (*gpbft.Justification, error) { + + scaledPowerTable, totalPower, err := powerTable.Scaled() + if err != nil { + return nil, err + } + + powerTableCid, err := certs.MakePowerTableCID(nextPowerTable) + if err != nil { + return nil, err + } + + payload := gpbft.Payload{ + Instance: instance, + Round: 0, + Step: gpbft.DECIDE_PHASE, + SupplementalData: gpbft.SupplementalData{ + PowerTable: powerTableCid, + }, + Value: chain, + } + msg := backend.MarshalPayloadForSigning(nn, &payload) + signers := rand.Perm(len(powerTable)) + signersBitfield := bitfield.New() + var signingPower uint16 + + type vote struct { + index int + sig []byte + pk gpbft.PubKey + } + + var votes []vote + for _, i := range signers { + pe := powerTable[i] + sig, err := backend.Sign(pe.PubKey, msg) + if err != nil { + return nil, err + } + votes = append(votes, vote{ + index: i, + sig: sig, + pk: pe.PubKey, + }) + + signersBitfield.Set(uint64(i)) + signingPower += scaledPowerTable[i] + if gpbft.IsStrongQuorum(signingPower, totalPower) { + break + } + } + slices.SortFunc(votes, func(a, b vote) int { + return cmp.Compare(a.index, b.index) + }) + pks := make([]gpbft.PubKey, len(votes)) + sigs := make([][]byte, len(votes)) + for i, vote := range votes { + pks[i] = vote.pk + sigs[i] = vote.sig + } + + sig, err := backend.Aggregate(pks, sigs) + if err != nil { + return nil, err + } + + return &gpbft.Justification{ + Vote: payload, + Signers: signersBitfield, + Signature: sig, + }, nil +} From 7f0e6a01d5a77a65b13a6ff1ee8e6b090e9ced03 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 1 Jul 2024 16:50:27 -0700 Subject: [PATCH 23/43] fixes and test subscriber --- certexchange/client.go | 36 +++--- certexchange/polling/common_test.go | 63 +++++++++++ certexchange/polling/discovery.go | 4 +- certexchange/polling/peerTracker.go | 36 +++--- certexchange/polling/peerTracker_test.go | 19 ++-- certexchange/polling/poller_test.go | 52 +-------- certexchange/polling/subscriber.go | 9 +- certexchange/polling/subscriber_test.go | 135 +++++++++++++++++++++++ 8 files changed, 252 insertions(+), 102 deletions(-) create mode 100644 certexchange/polling/subscriber_test.go diff --git a/certexchange/client.go b/certexchange/client.go index 56ae9c92..fe664b7a 100644 --- a/certexchange/client.go +++ b/certexchange/client.go @@ -12,7 +12,6 @@ import ( "github.com/filecoin-project/go-f3/certs" "github.com/filecoin-project/go-f3/gpbft" "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" ) @@ -32,21 +31,6 @@ type Client struct { Log f3.Logger } -func resetOnCancel(ctx context.Context, s network.Stream) func() { - errCh := make(chan error, 1) - cancel := context.AfterFunc(ctx, func() { - errCh <- s.Reset() - close(errCh) - }) - return func() { - if cancel() { - _ = s.Reset() - } else { - <-errCh - } - } -} - func (c *Client) withDeadline(ctx context.Context) (context.Context, context.CancelFunc) { if c.RequestTimeout > 0 { return context.WithTimeout(ctx, c.RequestTimeout) @@ -76,8 +60,10 @@ func (c *Client) Request(ctx context.Context, p peer.ID, req *Request) (_rh *Res if err != nil { return nil, nil, err } - // canceled by the `cancel` function returned by withDeadline above. - _ = resetOnCancel(ctx, stream) + // Reset the stream if the parent context is canceled. We never call the returned cancel + // function because we call the cancel function returned by `withDeadline` (which cancels + // the entire context tree). + context.AfterFunc(ctx, func() { _ = stream.Reset() }) if deadline, ok := ctx.Deadline(); ok { if err := stream.SetDeadline(deadline); err != nil { @@ -109,13 +95,13 @@ func (c *Client) Request(ctx context.Context, p peer.ID, req *Request) (_rh *Res return nil, nil, err } - // Copy/replace the cancel func so exiting the request doesn't cancel it. - cancelReq := cancel - cancel = nil - ch := make(chan *certs.FinalityCertificate, 1) // copy this in case the caller decides to re-use the request object... request := *req + + // Copy/replace the cancel func so exiting the request doesn't cancel it. + cancelReq := cancel + cancel = nil go func() { defer func() { if perr := recover(); perr != nil { @@ -131,7 +117,11 @@ func (c *Client) Request(ctx context.Context, p peer.ID, req *Request) (_rh *Res // large, but large power deltas could get close. br.N = maxPowerTableSize err := cert.UnmarshalCBOR(br) - if err != nil { + switch err { + case nil: + case io.EOF: + return + default: c.Log.Debugf("failed to unmarshal certificate from peer %s: %w", p, err) return } diff --git a/certexchange/polling/common_test.go b/certexchange/polling/common_test.go index ce6f9e47..e30bb6aa 100644 --- a/certexchange/polling/common_test.go +++ b/certexchange/polling/common_test.go @@ -1,11 +1,74 @@ package polling_test import ( + "math/rand" + "testing" + + "github.com/filecoin-project/go-f3/certs" "github.com/filecoin-project/go-f3/gpbft" + "github.com/filecoin-project/go-f3/sim" + "github.com/filecoin-project/go-f3/sim/signing" logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/require" ) const testNetworkName gpbft.NetworkName = "testnet" var log = logging.Logger("certexchange-poller-test") + +func init() { + logging.SetLogLevel("certexchange-poller-test", "debug") +} + +func makeCertificate(t *testing.T, rng *rand.Rand, tsg *sim.TipSetGenerator, backend signing.Backend, base *gpbft.TipSet, instance uint64, powerTable, nextPowerTable gpbft.PowerEntries) *certs.FinalityCertificate { + chainLen := rng.Intn(23) + 1 + chain, err := gpbft.NewChain(*base) + require.NoError(t, err) + + for i := 0; i < chainLen; i++ { + chain = chain.Extend(tsg.Sample()) + } + + j, err := sim.MakeJustification(backend, testNetworkName, chain, instance, powerTable, nextPowerTable) + require.NoError(t, err) + + c, err := certs.NewFinalityCertificate(certs.MakePowerTableDiff(powerTable, nextPowerTable), j) + require.NoError(t, err) + + return c +} + +func randomPowerTable(backend signing.Backend, entries int64) gpbft.PowerEntries { + powerTable := make(gpbft.PowerEntries, entries) + + for i := range powerTable { + key, _ := backend.GenerateKey() + powerTable[i] = gpbft.PowerEntry{ + ID: gpbft.ActorID(i + 1), + // Power chosen such that: + // - No small subset dominates the power table. + // - Lots of duplicate power values. + Power: gpbft.NewStoragePower(int64(len(powerTable)*2 - i/2)), + PubKey: key, + } + } + return powerTable +} + +func makeCertificates(t *testing.T, rng *rand.Rand, backend signing.Backend) ([]*certs.FinalityCertificate, gpbft.PowerEntries) { + powerTable := randomPowerTable(backend, 10) + tableCid, err := certs.MakePowerTableCID(powerTable) + require.NoError(t, err) + + tsg := sim.NewTipSetGenerator(rng.Uint64()) + base := &gpbft.TipSet{Epoch: 0, Key: tsg.Sample(), PowerTable: tableCid} + + certificates := make([]*certs.FinalityCertificate, 1000) + for i := range certificates { + cert := makeCertificate(t, rng, tsg, backend, base, uint64(i), powerTable, powerTable) + base = cert.ECChain.Head() + certificates[i] = cert + } + return certificates, powerTable +} diff --git a/certexchange/polling/discovery.go b/certexchange/polling/discovery.go index e639ffe8..de73d200 100644 --- a/certexchange/polling/discovery.go +++ b/certexchange/polling/discovery.go @@ -59,10 +59,10 @@ fillInitialPeers: var protos []protocol.ID var peer peer.ID switch e := evt.(type) { - case *event.EvtPeerIdentificationCompleted: + case event.EvtPeerIdentificationCompleted: protos = e.Protocols peer = e.Peer - case *event.EvtPeerProtocolsUpdated: + case event.EvtPeerProtocolsUpdated: protos = e.Added peer = e.Peer default: diff --git a/certexchange/polling/peerTracker.go b/certexchange/polling/peerTracker.go index 8ab0a632..4e83a1de 100644 --- a/certexchange/polling/peerTracker.go +++ b/certexchange/polling/peerTracker.go @@ -10,7 +10,7 @@ import ( ) const ( - hitMissSlidingWindow = 10 + hitMissSlidingWindow = 3 maxBackoffExponent = 8 // The default number of requests to make. defaultRequests = 8 @@ -36,7 +36,7 @@ const ( type peerRecord struct { sequentialFailures int - // Sliding windows of hits/misses (0-10 each). If either would exceed 10, we subtract 1 from + // Sliding windows of hits/misses (0-3 each). If either would exceed 3, we subtract 1 from // both (where 0 is the floor). // // - We use sliding windows to give more weight to recent hits/misses. @@ -65,9 +65,9 @@ type peerTracker struct { // TODO: garbage collect this. peers map[peer.ID]*peerRecord // TODO: Limit the number of active peers. - active []peer.ID - backoff backoffHeap - round int + active []peer.ID + backoff backoffHeap + lastHitRound, currentRound int } func (r *peerRecord) Cmp(other *peerRecord) int { @@ -160,7 +160,7 @@ func (r *peerRecord) hitRate() (float64, int) { if total > 0 { rate = float64(r.hits) / float64(total) } - return rate, min(total, 10) + return rate, min(total, hitMissSlidingWindow) } @@ -185,12 +185,13 @@ func (t *peerTracker) recordFailure(p peer.ID) { // When we fail to query a peer, backoff that peer. r := &backoffRecord{ peer: p, - delayUntil: t.round + t.getOrCreate(p).recordFailure(), + delayUntil: t.currentRound + t.getOrCreate(p).recordFailure(), } heap.Push(&t.backoff, r) } func (t *peerTracker) recordHit(p peer.ID) { + t.lastHitRound = t.currentRound t.getOrCreate(p).recordHit() } @@ -221,13 +222,13 @@ func (t *peerTracker) suggestPeers() []peer.ID { // Advance the round and move peers from backoff to active, if necessary. for t.backoff.Len() > 0 { r := t.backoff[len(t.backoff)-1] - if r.delayUntil > t.round { + if r.delayUntil > t.currentRound { break } heap.Pop(&t.backoff) t.makeActive(r.peer) } - t.round++ + t.currentRound++ // Sort from best to worst. slices.SortFunc(t.active, func(a, b peer.ID) int { @@ -246,6 +247,15 @@ trimLoop: t.active = t.active[:l] } + // Adjust the minimum peer count and probability threshold based on the current distance to + // the last successful round (capped at 8 rounds). + // - We increase the minimum peer threshold exponentially till we hit the max. + // - We increase the target confidence lineally. We still want to try more and more of our + // "good" peers but... as we keep failing, we want to try more and more random peers. + distance := min(8, t.currentRound-t.lastHitRound) + minPeers := min(minRequests<<(distance-1), maxRequests) + threshold := targetConfidence * float64(distance) + var prob float64 var peerCount int for _, p := range t.active { @@ -261,7 +271,7 @@ trimLoop: if peerCount >= maxRequests { break } - if prob >= targetConfidence { + if prob >= threshold { break } } @@ -270,14 +280,14 @@ trimLoop: if peerCount == len(t.active) { // We've chosen all peers, nothing else we can do. - } else if prob < targetConfidence { + } else if prob < threshold { // If we failed to reach the target probability, choose randomly from the remaining // peers. chosen = append(chosen, choose(t.active[peerCount:], maxRequests-peerCount)...) - } else if peerCount < minRequests { + } else if peerCount < minPeers { // If we reached the target probability but didn't reach the number of minimum // requests, pick a few more peers to fill us out. - chosen = append(chosen, choose(t.active[peerCount:], minRequests-peerCount)...) + chosen = append(chosen, choose(t.active[peerCount:], minPeers-peerCount)...) } return chosen diff --git a/certexchange/polling/peerTracker_test.go b/certexchange/polling/peerTracker_test.go index 343bba38..648488c3 100644 --- a/certexchange/polling/peerTracker_test.go +++ b/certexchange/polling/peerTracker_test.go @@ -23,7 +23,7 @@ func TestPeerRecordHitMiss(t *testing.T) { r.recordHit() hitRate, count := r.hitRate() require.Equal(t, 1.0, hitRate) - require.Equal(t, i, count) + require.Equal(t, min(i, hitMissSlidingWindow), count) } // 3 misses @@ -31,26 +31,25 @@ func TestPeerRecordHitMiss(t *testing.T) { r.recordMiss() hitRate, count := r.hitRate() require.Less(t, hitRate, 1.0) - require.Equal(t, i, count) + require.Equal(t, min(i, hitMissSlidingWindow), count) } // Should be 50/50. { hitRate, count := r.hitRate() - require.Equal(t, 6, count) + require.Equal(t, min(6, hitMissSlidingWindow), count) require.Equal(t, 0.5, hitRate) } - // 10 hits - for i := 0; i < 7; i++ { + // 2 hits + for i := 0; i < 2; i++ { r.recordHit() } - // Still shouldn't be 100% hit, but should be "full" { hitRate, count := r.hitRate() require.Less(t, hitRate, 1.0) - require.Equal(t, 10, count) + require.Equal(t, hitMissSlidingWindow, count) } // 3 more hits @@ -62,12 +61,12 @@ func TestPeerRecordHitMiss(t *testing.T) { { hitRate, count := r.hitRate() require.Equal(t, hitRate, 1.0) - require.Equal(t, 10, count) + require.Equal(t, hitMissSlidingWindow, count) } - // 10 misses should bring us back to 50/50 + // should bring us back to 50/50 - for i := 0; i < 10; i++ { + for i := 0; i < hitMissSlidingWindow; i++ { r.recordMiss() } diff --git a/certexchange/polling/poller_test.go b/certexchange/polling/poller_test.go index 90001e23..44e052f8 100644 --- a/certexchange/polling/poller_test.go +++ b/certexchange/polling/poller_test.go @@ -7,10 +7,7 @@ import ( "github.com/filecoin-project/go-f3/certexchange" "github.com/filecoin-project/go-f3/certexchange/polling" - "github.com/filecoin-project/go-f3/certs" "github.com/filecoin-project/go-f3/certstore" - "github.com/filecoin-project/go-f3/gpbft" - "github.com/filecoin-project/go-f3/sim" "github.com/filecoin-project/go-f3/sim/signing" "github.com/ipfs/go-datastore" @@ -19,58 +16,11 @@ import ( "github.com/stretchr/testify/require" ) -func randomPowerTable(backend signing.Backend, entries int64) gpbft.PowerEntries { - powerTable := make(gpbft.PowerEntries, entries) - - for i := range powerTable { - key, _ := backend.GenerateKey() - powerTable[i] = gpbft.PowerEntry{ - ID: gpbft.ActorID(i + 1), - // Power chosen such that: - // - No small subset dominates the power table. - // - Lots of duplicate power values. - Power: gpbft.NewStoragePower(int64(len(powerTable)*2 - i/2)), - PubKey: key, - } - } - return powerTable -} - -func makeCertificate(t *testing.T, rng *rand.Rand, tsg *sim.TipSetGenerator, backend signing.Backend, base *gpbft.TipSet, instance uint64, powerTable, nextPowerTable gpbft.PowerEntries) *certs.FinalityCertificate { - chainLen := rng.Intn(23) + 1 - chain, err := gpbft.NewChain(*base) - require.NoError(t, err) - - for i := 0; i < chainLen; i++ { - chain = chain.Extend(tsg.Sample()) - } - - j, err := sim.MakeJustification(backend, testNetworkName, chain, instance, powerTable, nextPowerTable) - require.NoError(t, err) - - c, err := certs.NewFinalityCertificate(certs.MakePowerTableDiff(powerTable, nextPowerTable), j) - require.NoError(t, err) - - return c -} - func TestPoller(t *testing.T) { backend := signing.NewFakeBackend() rng := rand.New(rand.NewSource(1234)) - powerTable := randomPowerTable(backend, 10) - tableCid, err := certs.MakePowerTableCID(powerTable) - require.NoError(t, err) - - tsg := sim.NewTipSetGenerator(rng.Uint64()) - base := &gpbft.TipSet{Epoch: 0, Key: tsg.Sample(), PowerTable: tableCid} - - certificates := make([]*certs.FinalityCertificate, 1000) - for i := range certificates { - cert := makeCertificate(t, rng, tsg, backend, base, uint64(i), powerTable, powerTable) - base = cert.ECChain.Head() - certificates[i] = cert - } + certificates, powerTable := makeCertificates(t, rng, backend) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/certexchange/polling/subscriber.go b/certexchange/polling/subscriber.go index 484d7d80..c6b3e950 100644 --- a/certexchange/polling/subscriber.go +++ b/certexchange/polling/subscriber.go @@ -63,8 +63,8 @@ func (s *Subscriber) Start() error { s.wg.Done() }() - if err := s.run(ctx); err != nil && ctx.Err() != nil { - s.Log.Errorf("polling certificate exchange subscriber exited early: %w", err) + if err := s.run(ctx); err != nil && ctx.Err() == nil { + s.Log.Errorf("polling certificate exchange subscriber exited early: %s", err) } }() @@ -128,13 +128,16 @@ func (s *Subscriber) poll(ctx context.Context) (uint64, error) { hits []peer.ID ) + peers := s.peerTracker.suggestPeers() start := s.poller.NextInstance - for _, peer := range s.peerTracker.suggestPeers() { + s.Log.Debugf("polling %d peers for instance %d", len(peers), s.poller.NextInstance) + for _, peer := range peers { oldInstance := s.poller.NextInstance res, err := s.poller.Poll(ctx, peer) if err != nil { return s.poller.NextInstance - start, err } + s.Log.Debugf("polled %s for instance %d, got %s", peer, s.poller.NextInstance, res) // If we manage to advance, all old "hits" are actually misses. if oldInstance < s.poller.NextInstance { misses = append(misses, hits...) diff --git a/certexchange/polling/subscriber_test.go b/certexchange/polling/subscriber_test.go new file mode 100644 index 00000000..7742ce52 --- /dev/null +++ b/certexchange/polling/subscriber_test.go @@ -0,0 +1,135 @@ +package polling_test + +import ( + "context" + "math/rand" + "slices" + "testing" + "time" + + "github.com/filecoin-project/go-f3/certexchange" + "github.com/filecoin-project/go-f3/certexchange/polling" + "github.com/filecoin-project/go-f3/certstore" + "github.com/filecoin-project/go-f3/sim/signing" + + "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + mocknetwork "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/require" +) + +func TestSubscriber(t *testing.T) { + backend := signing.NewFakeBackend() + rng := rand.New(rand.NewSource(1234)) + + certificates, powerTable := makeCertificates(t, rng, backend) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mocknet := mocknetwork.New() + const serverCount = 100 + + clientHost, err := mocknet.GenPeer() + require.NoError(t, err) + + servers := make([]*certexchange.Server, 100) + for i := range servers { + h, err := mocknet.GenPeer() + require.NoError(t, err) + + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + cs, err := certstore.CreateStore(ctx, ds, 0, powerTable) + require.NoError(t, err) + + servers[i] = &certexchange.Server{ + NetworkName: testNetworkName, + Host: h, + Store: cs, + Log: log, + } + } + + require.NoError(t, mocknet.LinkAll()) + + for _, server := range servers { + require.NoError(t, server.Start()) + t.Cleanup(func() { server.Stop() }) + } + + clientDs := ds_sync.MutexWrap(datastore.NewMapDatastore()) + clientCs, err := certstore.CreateStore(ctx, clientDs, 0, powerTable) + require.NoError(t, err) + + client := certexchange.Client{ + Host: clientHost, + NetworkName: testNetworkName, + Log: log, + } + + subscriber := polling.Subscriber{ + Client: client, + Store: clientCs, + SignatureVerifier: backend, + MinimumPollInterval: time.Millisecond, + MaximumPollInterval: time.Second, + InitialPollInterval: 100 * time.Millisecond, + } + + require.NoError(t, subscriber.Start()) + + defer subscriber.Stop() + + require.NoError(t, mocknet.ConnectAllButSelf()) + + for _, s := range servers { + require.NoError(t, s.Store.Put(ctx, certificates[0])) + } + + require.Eventually(t, func() bool { + return clientCs.Latest() != nil + }, time.Second, 10*time.Millisecond) + + require.Equal(t, uint64(0), clientCs.Latest().GPBFTInstance) + + // Slowly drop servers from the network + liveServers := slices.Clone(servers) + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + // Let the network run fine for a few rounds. + nextInstance := uint64(1) + for ; nextInstance < 10; nextInstance++ { + <-ticker.C + + require.Eventually(t, func() bool { + return clientCs.Latest().GPBFTInstance == nextInstance-1 + }, time.Second, 10*time.Millisecond) + + for _, s := range liveServers { + require.NoError(t, s.Store.Put(ctx, certificates[nextInstance])) + } + } + + // Then kill 20% of the network every three instances. + for ; len(liveServers) > 0; nextInstance++ { + <-ticker.C + + require.Eventually(t, func() bool { + return clientCs.Latest().GPBFTInstance == uint64(nextInstance)-1 + }, 10*time.Second, 10*time.Millisecond) + + // Every 4 instances, stop updating 20% of the network. + if nextInstance%4 == 0 { + rand.Shuffle(len(liveServers), func(a, b int) { + liveServers[a], liveServers[b] = liveServers[b], liveServers[a] + }) + liveServers = liveServers[:8*len(liveServers)/10] + } + + for _, s := range liveServers { + require.NoError(t, s.Store.Put(ctx, certificates[nextInstance])) + } + } + subscriber.Stop() +} From 0d47580a3065535a3f3855842d7061d12875da85 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 1 Jul 2024 16:52:28 -0700 Subject: [PATCH 24/43] lints --- certexchange/polling/poller_test.go | 1 - certexchange/polling/subscriber_test.go | 1 - 2 files changed, 2 deletions(-) diff --git a/certexchange/polling/poller_test.go b/certexchange/polling/poller_test.go index 44e052f8..db8fa2d5 100644 --- a/certexchange/polling/poller_test.go +++ b/certexchange/polling/poller_test.go @@ -26,7 +26,6 @@ func TestPoller(t *testing.T) { defer cancel() mocknet := mocknetwork.New() - const serverCount = 100 clientHost, err := mocknet.GenPeer() require.NoError(t, err) diff --git a/certexchange/polling/subscriber_test.go b/certexchange/polling/subscriber_test.go index 7742ce52..21255445 100644 --- a/certexchange/polling/subscriber_test.go +++ b/certexchange/polling/subscriber_test.go @@ -28,7 +28,6 @@ func TestSubscriber(t *testing.T) { defer cancel() mocknet := mocknetwork.New() - const serverCount = 100 clientHost, err := mocknet.GenPeer() require.NoError(t, err) From 4219e2def60714d714196d24423355507c10bcfa Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 1 Jul 2024 17:01:14 -0700 Subject: [PATCH 25/43] remove test logging --- certexchange/polling/common_test.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/certexchange/polling/common_test.go b/certexchange/polling/common_test.go index e30bb6aa..46e22afd 100644 --- a/certexchange/polling/common_test.go +++ b/certexchange/polling/common_test.go @@ -17,10 +17,6 @@ const testNetworkName gpbft.NetworkName = "testnet" var log = logging.Logger("certexchange-poller-test") -func init() { - logging.SetLogLevel("certexchange-poller-test", "debug") -} - func makeCertificate(t *testing.T, rng *rand.Rand, tsg *sim.TipSetGenerator, backend signing.Backend, base *gpbft.TipSet, instance uint64, powerTable, nextPowerTable gpbft.PowerEntries) *certs.FinalityCertificate { chainLen := rng.Intn(23) + 1 chain, err := gpbft.NewChain(*base) From be373d0b7afbcbe3a598896d14ac410e90f67a4b Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 1 Jul 2024 17:17:00 -0700 Subject: [PATCH 26/43] fix lints --- certexchange/polling/poller_test.go | 4 ++-- certexchange/polling/subscriber_test.go | 5 ++--- certexchange/protocol_test.go | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/certexchange/polling/poller_test.go b/certexchange/polling/poller_test.go index db8fa2d5..6acf8ee1 100644 --- a/certexchange/polling/poller_test.go +++ b/certexchange/polling/poller_test.go @@ -53,7 +53,7 @@ func TestPoller(t *testing.T) { } require.NoError(t, server.Start()) - defer server.Stop() + t.Cleanup(func() { require.NoError(t, server.Stop()) }) clientDs := ds_sync.MutexWrap(datastore.NewMapDatastore()) clientCs, err := certstore.CreateStore(ctx, clientDs, 0, powerTable) @@ -137,7 +137,7 @@ func TestPoller(t *testing.T) { } // Stop the server, and make sure we get a failure. - server.Stop() + require.NoError(t, server.Stop()) { res, err := poller.Poll(ctx, serverHost.ID()) diff --git a/certexchange/polling/subscriber_test.go b/certexchange/polling/subscriber_test.go index 21255445..cbd1923d 100644 --- a/certexchange/polling/subscriber_test.go +++ b/certexchange/polling/subscriber_test.go @@ -53,7 +53,7 @@ func TestSubscriber(t *testing.T) { for _, server := range servers { require.NoError(t, server.Start()) - t.Cleanup(func() { server.Stop() }) + t.Cleanup(func() { require.NoError(t, server.Stop()) }) } clientDs := ds_sync.MutexWrap(datastore.NewMapDatastore()) @@ -77,7 +77,7 @@ func TestSubscriber(t *testing.T) { require.NoError(t, subscriber.Start()) - defer subscriber.Stop() + t.Cleanup(func() { require.NoError(t, subscriber.Stop()) }) require.NoError(t, mocknet.ConnectAllButSelf()) @@ -130,5 +130,4 @@ func TestSubscriber(t *testing.T) { require.NoError(t, s.Store.Put(ctx, certificates[nextInstance])) } } - subscriber.Stop() } diff --git a/certexchange/protocol_test.go b/certexchange/protocol_test.go index 19461b8e..fee9d269 100644 --- a/certexchange/protocol_test.go +++ b/certexchange/protocol_test.go @@ -78,7 +78,7 @@ func TestClientServer(t *testing.T) { } require.NoError(t, server.Start()) - defer server.Stop() + t.Cleanup(func() { require.NoError(t, server.Stop()) }) require.NoError(t, mocknet.ConnectAllButSelf()) From 0a09e4f0f4866f479302366397e23d29ab2297ed Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 1 Jul 2024 17:34:28 -0700 Subject: [PATCH 27/43] fix peer-tracker off-by-one bug and tests --- certexchange/polling/peerTracker.go | 8 +++++++- certexchange/polling/peerTracker_test.go | 5 ++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/certexchange/polling/peerTracker.go b/certexchange/polling/peerTracker.go index 4e83a1de..57e123e3 100644 --- a/certexchange/polling/peerTracker.go +++ b/certexchange/polling/peerTracker.go @@ -195,6 +195,12 @@ func (t *peerTracker) recordHit(p peer.ID) { t.getOrCreate(p).recordHit() } +// Reset the "last hit round" to the current round, even if there were no hits. +// Used for testing. +func (t *peerTracker) resetLastHitRound() { + t.lastHitRound = t.currentRound +} + func (t *peerTracker) makeActive(p peer.ID) { r := t.getOrCreate(p) switch r.state { @@ -244,7 +250,7 @@ trimLoop: case peerDeactivating: r.state = peerInactive } - t.active = t.active[:l] + t.active = t.active[:l-1] } // Adjust the minimum peer count and probability threshold based on the current distance to diff --git a/certexchange/polling/peerTracker_test.go b/certexchange/polling/peerTracker_test.go index 648488c3..7345750f 100644 --- a/certexchange/polling/peerTracker_test.go +++ b/certexchange/polling/peerTracker_test.go @@ -118,16 +118,19 @@ func TestPeerTracker(t *testing.T) { for _, n := range []int{0, 1, defaultRequests / 2, defaultRequests/2 - 1} { discoverPeers(n) + pt.resetLastHitRound() suggested := pt.suggestPeers() require.ElementsMatch(t, peers, suggested) } // Too many peers discoverPeers(1) + pt.resetLastHitRound() require.Less(t, len(pt.suggestPeers()), len(peers)) // fail a peer and we should pick the other peers now. pt.recordMiss(peers[0]) + pt.resetLastHitRound() require.ElementsMatch(t, peers[1:], pt.suggestPeers()) @@ -153,7 +156,7 @@ func TestPeerTracker(t *testing.T) { require.NotContains(t, pt.suggestPeers(), peers[0]) require.NotContains(t, pt.suggestPeers(), peers[0]) require.NotContains(t, pt.suggestPeers(), peers[0]) - require.Equal(t, pt.suggestPeers()[0], peers[0]) + require.Contains(t, pt.suggestPeers(), peers[0]) // Now, give that peer a perfect success rate for i := 0; i < 100; i++ { From a87bfe9aa5cc6ea0c711c6b612dca2eeaeb2473c Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 11:03:47 +0200 Subject: [PATCH 28/43] don't require local stringer install --- certexchange/polling/poller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/certexchange/polling/poller.go b/certexchange/polling/poller.go index a03ee689..59d7de54 100644 --- a/certexchange/polling/poller.go +++ b/certexchange/polling/poller.go @@ -1,4 +1,4 @@ -//go:generate stringer -type=PollResult +//go:generate go run golang.org/x/tools/cmd/stringer -type=PollResult package polling import ( From bec6c3d598b2b59f3519b2df7c04e9c94f1657d7 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 11:12:55 +0200 Subject: [PATCH 29/43] remove test-only method --- certexchange/polling/peerTracker.go | 6 ------ certexchange/polling/peerTracker_test.go | 6 +++--- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/certexchange/polling/peerTracker.go b/certexchange/polling/peerTracker.go index 57e123e3..33c96522 100644 --- a/certexchange/polling/peerTracker.go +++ b/certexchange/polling/peerTracker.go @@ -195,12 +195,6 @@ func (t *peerTracker) recordHit(p peer.ID) { t.getOrCreate(p).recordHit() } -// Reset the "last hit round" to the current round, even if there were no hits. -// Used for testing. -func (t *peerTracker) resetLastHitRound() { - t.lastHitRound = t.currentRound -} - func (t *peerTracker) makeActive(p peer.ID) { r := t.getOrCreate(p) switch r.state { diff --git a/certexchange/polling/peerTracker_test.go b/certexchange/polling/peerTracker_test.go index 7345750f..34365ba8 100644 --- a/certexchange/polling/peerTracker_test.go +++ b/certexchange/polling/peerTracker_test.go @@ -118,19 +118,19 @@ func TestPeerTracker(t *testing.T) { for _, n := range []int{0, 1, defaultRequests / 2, defaultRequests/2 - 1} { discoverPeers(n) - pt.resetLastHitRound() + pt.lastHitRound = pt.currentRound suggested := pt.suggestPeers() require.ElementsMatch(t, peers, suggested) } // Too many peers discoverPeers(1) - pt.resetLastHitRound() + pt.lastHitRound = pt.currentRound require.Less(t, len(pt.suggestPeers()), len(peers)) // fail a peer and we should pick the other peers now. pt.recordMiss(peers[0]) - pt.resetLastHitRound() + pt.lastHitRound = pt.currentRound require.ElementsMatch(t, peers[1:], pt.suggestPeers()) From 5ca20b85af18d6c862bf1b75280c4457c1972d33 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 11:31:47 +0200 Subject: [PATCH 30/43] use a mock clock --- certexchange/polling/common.go | 7 +++++++ certexchange/polling/common_test.go | 28 +++++++++++++++++-------- certexchange/polling/poller_test.go | 10 ++++----- certexchange/polling/subscriber.go | 4 ++-- certexchange/polling/subscriber_test.go | 22 +++++++++++-------- go.mod | 2 +- 6 files changed, 47 insertions(+), 26 deletions(-) create mode 100644 certexchange/polling/common.go diff --git a/certexchange/polling/common.go b/certexchange/polling/common.go new file mode 100644 index 00000000..f05c922e --- /dev/null +++ b/certexchange/polling/common.go @@ -0,0 +1,7 @@ +package polling + +import ( + "github.com/benbjohnson/clock" +) + +var clk clock.Clock = clock.New() diff --git a/certexchange/polling/common_test.go b/certexchange/polling/common_test.go index 46e22afd..60398e1a 100644 --- a/certexchange/polling/common_test.go +++ b/certexchange/polling/common_test.go @@ -1,4 +1,4 @@ -package polling_test +package polling import ( "math/rand" @@ -9,15 +9,25 @@ import ( "github.com/filecoin-project/go-f3/sim" "github.com/filecoin-project/go-f3/sim/signing" + "github.com/benbjohnson/clock" logging "github.com/ipfs/go-log/v2" "github.com/stretchr/testify/require" ) -const testNetworkName gpbft.NetworkName = "testnet" +// The network name used in tests. +const TestNetworkName gpbft.NetworkName = "testnet" -var log = logging.Logger("certexchange-poller-test") +// The logger used in tests. +var TestLog = logging.Logger("certexchange-poller-test") -func makeCertificate(t *testing.T, rng *rand.Rand, tsg *sim.TipSetGenerator, backend signing.Backend, base *gpbft.TipSet, instance uint64, powerTable, nextPowerTable gpbft.PowerEntries) *certs.FinalityCertificate { +// The clock used in tests. Time doesn't pass in tests unless you add time to this clock. +var MockClock = clock.NewMock() + +func init() { + clk = MockClock +} + +func MakeCertificate(t *testing.T, rng *rand.Rand, tsg *sim.TipSetGenerator, backend signing.Backend, base *gpbft.TipSet, instance uint64, powerTable, nextPowerTable gpbft.PowerEntries) *certs.FinalityCertificate { chainLen := rng.Intn(23) + 1 chain, err := gpbft.NewChain(*base) require.NoError(t, err) @@ -26,7 +36,7 @@ func makeCertificate(t *testing.T, rng *rand.Rand, tsg *sim.TipSetGenerator, bac chain = chain.Extend(tsg.Sample()) } - j, err := sim.MakeJustification(backend, testNetworkName, chain, instance, powerTable, nextPowerTable) + j, err := sim.MakeJustification(backend, TestNetworkName, chain, instance, powerTable, nextPowerTable) require.NoError(t, err) c, err := certs.NewFinalityCertificate(certs.MakePowerTableDiff(powerTable, nextPowerTable), j) @@ -35,7 +45,7 @@ func makeCertificate(t *testing.T, rng *rand.Rand, tsg *sim.TipSetGenerator, bac return c } -func randomPowerTable(backend signing.Backend, entries int64) gpbft.PowerEntries { +func RandomPowerTable(backend signing.Backend, entries int64) gpbft.PowerEntries { powerTable := make(gpbft.PowerEntries, entries) for i := range powerTable { @@ -52,8 +62,8 @@ func randomPowerTable(backend signing.Backend, entries int64) gpbft.PowerEntries return powerTable } -func makeCertificates(t *testing.T, rng *rand.Rand, backend signing.Backend) ([]*certs.FinalityCertificate, gpbft.PowerEntries) { - powerTable := randomPowerTable(backend, 10) +func MakeCertificates(t *testing.T, rng *rand.Rand, backend signing.Backend) ([]*certs.FinalityCertificate, gpbft.PowerEntries) { + powerTable := RandomPowerTable(backend, 10) tableCid, err := certs.MakePowerTableCID(powerTable) require.NoError(t, err) @@ -62,7 +72,7 @@ func makeCertificates(t *testing.T, rng *rand.Rand, backend signing.Backend) ([] certificates := make([]*certs.FinalityCertificate, 1000) for i := range certificates { - cert := makeCertificate(t, rng, tsg, backend, base, uint64(i), powerTable, powerTable) + cert := MakeCertificate(t, rng, tsg, backend, base, uint64(i), powerTable, powerTable) base = cert.ECChain.Head() certificates[i] = cert } diff --git a/certexchange/polling/poller_test.go b/certexchange/polling/poller_test.go index 6acf8ee1..93d6e12a 100644 --- a/certexchange/polling/poller_test.go +++ b/certexchange/polling/poller_test.go @@ -20,7 +20,7 @@ func TestPoller(t *testing.T) { backend := signing.NewFakeBackend() rng := rand.New(rand.NewSource(1234)) - certificates, powerTable := makeCertificates(t, rng, backend) + certificates, powerTable := polling.MakeCertificates(t, rng, backend) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -46,10 +46,10 @@ func TestPoller(t *testing.T) { } server := certexchange.Server{ - NetworkName: testNetworkName, + NetworkName: polling.TestNetworkName, Host: serverHost, Store: serverCs, - Log: log, + Log: polling.TestLog, } require.NoError(t, server.Start()) @@ -61,8 +61,8 @@ func TestPoller(t *testing.T) { client := certexchange.Client{ Host: clientHost, - NetworkName: testNetworkName, - Log: log, + NetworkName: polling.TestNetworkName, + Log: polling.TestLog, } poller, err := polling.NewPoller(ctx, &client, clientCs, backend) diff --git a/certexchange/polling/subscriber.go b/certexchange/polling/subscriber.go index c6b3e950..78ae5c0f 100644 --- a/certexchange/polling/subscriber.go +++ b/certexchange/polling/subscriber.go @@ -81,7 +81,7 @@ func (s *Subscriber) Stop() error { } func (s *Subscriber) run(ctx context.Context) error { - timer := time.NewTimer(s.InitialPollInterval) + timer := clk.Timer(s.InitialPollInterval) defer timer.Stop() predictor := newPredictor( @@ -114,7 +114,7 @@ func (s *Subscriber) run(ctx context.Context) error { nextInterval := predictor.update(progress) nextPollTime := pollTime.Add(nextInterval) - timer.Reset(max(time.Until(nextPollTime), 0)) + timer.Reset(max(clk.Until(nextPollTime), 0)) case <-ctx.Done(): return ctx.Err() } diff --git a/certexchange/polling/subscriber_test.go b/certexchange/polling/subscriber_test.go index cbd1923d..c52547dc 100644 --- a/certexchange/polling/subscriber_test.go +++ b/certexchange/polling/subscriber_test.go @@ -22,7 +22,7 @@ func TestSubscriber(t *testing.T) { backend := signing.NewFakeBackend() rng := rand.New(rand.NewSource(1234)) - certificates, powerTable := makeCertificates(t, rng, backend) + certificates, powerTable := polling.MakeCertificates(t, rng, backend) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -42,10 +42,10 @@ func TestSubscriber(t *testing.T) { require.NoError(t, err) servers[i] = &certexchange.Server{ - NetworkName: testNetworkName, + NetworkName: polling.TestNetworkName, Host: h, Store: cs, - Log: log, + Log: polling.TestLog, } } @@ -62,8 +62,8 @@ func TestSubscriber(t *testing.T) { client := certexchange.Client{ Host: clientHost, - NetworkName: testNetworkName, - Log: log, + NetworkName: polling.TestNetworkName, + Log: polling.TestLog, } subscriber := polling.Subscriber{ @@ -85,6 +85,8 @@ func TestSubscriber(t *testing.T) { require.NoError(t, s.Store.Put(ctx, certificates[0])) } + polling.MockClock.Add(100 * time.Millisecond) + require.Eventually(t, func() bool { return clientCs.Latest() != nil }, time.Second, 10*time.Millisecond) @@ -99,11 +101,12 @@ func TestSubscriber(t *testing.T) { // Let the network run fine for a few rounds. nextInstance := uint64(1) for ; nextInstance < 10; nextInstance++ { - <-ticker.C + polling.MockClock.Add(100 * time.Millisecond) require.Eventually(t, func() bool { + polling.MockClock.Add(10 * time.Millisecond) return clientCs.Latest().GPBFTInstance == nextInstance-1 - }, time.Second, 10*time.Millisecond) + }, time.Second, time.Millisecond) for _, s := range liveServers { require.NoError(t, s.Store.Put(ctx, certificates[nextInstance])) @@ -112,11 +115,12 @@ func TestSubscriber(t *testing.T) { // Then kill 20% of the network every three instances. for ; len(liveServers) > 0; nextInstance++ { - <-ticker.C + polling.MockClock.Add(100 * time.Millisecond) require.Eventually(t, func() bool { + polling.MockClock.Add(10 * time.Millisecond) return clientCs.Latest().GPBFTInstance == uint64(nextInstance)-1 - }, 10*time.Second, 10*time.Millisecond) + }, time.Second, time.Millisecond) // Every 4 instances, stop updating 20% of the network. if nextInstance%4 == 0 { diff --git a/go.mod b/go.mod index 212389a3..86ad9adf 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.21 require ( github.com/Kubuxu/go-broadcast v0.0.0-20240621161059-1a8c90734cd6 + github.com/benbjohnson/clock v1.3.5 github.com/drand/kyber v1.3.1 github.com/drand/kyber-bls12381 v0.3.1 github.com/filecoin-project/go-bitfield v0.2.4 @@ -22,7 +23,6 @@ require ( ) require ( - github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect From 89a32faf5df405da55d48677d20f3eec32d8aec8 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 11:50:54 +0200 Subject: [PATCH 31/43] improve timing reliability --- certexchange/polling/subscriber_test.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/certexchange/polling/subscriber_test.go b/certexchange/polling/subscriber_test.go index c52547dc..61772823 100644 --- a/certexchange/polling/subscriber_test.go +++ b/certexchange/polling/subscriber_test.go @@ -95,32 +95,35 @@ func TestSubscriber(t *testing.T) { // Slowly drop servers from the network liveServers := slices.Clone(servers) - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() // Let the network run fine for a few rounds. nextInstance := uint64(1) for ; nextInstance < 10; nextInstance++ { + for _, s := range liveServers { + require.NoError(t, s.Store.Put(ctx, certificates[nextInstance])) + } + polling.MockClock.Add(100 * time.Millisecond) require.Eventually(t, func() bool { polling.MockClock.Add(10 * time.Millisecond) - return clientCs.Latest().GPBFTInstance == nextInstance-1 - }, time.Second, time.Millisecond) + return clientCs.Latest().GPBFTInstance == nextInstance + }, 10*time.Second, time.Millisecond) - for _, s := range liveServers { - require.NoError(t, s.Store.Put(ctx, certificates[nextInstance])) - } } // Then kill 20% of the network every three instances. for ; len(liveServers) > 0; nextInstance++ { + for _, s := range liveServers { + require.NoError(t, s.Store.Put(ctx, certificates[nextInstance])) + } + polling.MockClock.Add(100 * time.Millisecond) require.Eventually(t, func() bool { polling.MockClock.Add(10 * time.Millisecond) - return clientCs.Latest().GPBFTInstance == uint64(nextInstance)-1 - }, time.Second, time.Millisecond) + return clientCs.Latest().GPBFTInstance == uint64(nextInstance) + }, 10*time.Second, time.Millisecond) // Every 4 instances, stop updating 20% of the network. if nextInstance%4 == 0 { @@ -130,8 +133,5 @@ func TestSubscriber(t *testing.T) { liveServers = liveServers[:8*len(liveServers)/10] } - for _, s := range liveServers { - require.NoError(t, s.Store.Put(ctx, certificates[nextInstance])) - } } } From 13e32838782fd324a15f6f3b8f89821ad63fa440 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 12:06:33 +0200 Subject: [PATCH 32/43] improved logging --- certexchange/polling/subscriber.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/certexchange/polling/subscriber.go b/certexchange/polling/subscriber.go index 78ae5c0f..6e31ada5 100644 --- a/certexchange/polling/subscriber.go +++ b/certexchange/polling/subscriber.go @@ -114,7 +114,9 @@ func (s *Subscriber) run(ctx context.Context) error { nextInterval := predictor.update(progress) nextPollTime := pollTime.Add(nextInterval) - timer.Reset(max(clk.Until(nextPollTime), 0)) + delay := max(clk.Until(nextPollTime), 0) + s.Log.Infof("predicted interval is %s (waiting %s)", nextInterval, delay) + timer.Reset(delay) case <-ctx.Done(): return ctx.Err() } From a0daf566850711da6c0197bf4c025c72a9b667f1 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 12:06:48 +0200 Subject: [PATCH 33/43] improve test reliability --- certexchange/polling/common_test.go | 2 +- certexchange/polling/subscriber_test.go | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/certexchange/polling/common_test.go b/certexchange/polling/common_test.go index 60398e1a..fec20b6d 100644 --- a/certexchange/polling/common_test.go +++ b/certexchange/polling/common_test.go @@ -18,7 +18,7 @@ import ( const TestNetworkName gpbft.NetworkName = "testnet" // The logger used in tests. -var TestLog = logging.Logger("certexchange-poller-test") +var TestLog = logging.Logger("f3-testing") // The clock used in tests. Time doesn't pass in tests unless you add time to this clock. var MockClock = clock.NewMock() diff --git a/certexchange/polling/subscriber_test.go b/certexchange/polling/subscriber_test.go index 61772823..867fef24 100644 --- a/certexchange/polling/subscriber_test.go +++ b/certexchange/polling/subscriber_test.go @@ -103,13 +103,14 @@ func TestSubscriber(t *testing.T) { require.NoError(t, s.Store.Put(ctx, certificates[nextInstance])) } - polling.MockClock.Add(100 * time.Millisecond) - + i := 0 require.Eventually(t, func() bool { + i += 10 polling.MockClock.Add(10 * time.Millisecond) return clientCs.Latest().GPBFTInstance == nextInstance }, 10*time.Second, time.Millisecond) + polling.MockClock.Add(time.Duration(max(0, 100-i)) * time.Millisecond) } // Then kill 20% of the network every three instances. @@ -118,13 +119,15 @@ func TestSubscriber(t *testing.T) { require.NoError(t, s.Store.Put(ctx, certificates[nextInstance])) } - polling.MockClock.Add(100 * time.Millisecond) - + i := 0 require.Eventually(t, func() bool { + i += 10 polling.MockClock.Add(10 * time.Millisecond) return clientCs.Latest().GPBFTInstance == uint64(nextInstance) }, 10*time.Second, time.Millisecond) + polling.MockClock.Add(time.Duration(max(0, 100-i)) * time.Millisecond) + // Every 4 instances, stop updating 20% of the network. if nextInstance%4 == 0 { rand.Shuffle(len(liveServers), func(a, b int) { From 4bdf36e71d62ac001a39ba71d8d2b4f11f2c42ed Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 12:16:01 +0200 Subject: [PATCH 34/43] cleanup --- certexchange/polling/peerTracker.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/certexchange/polling/peerTracker.go b/certexchange/polling/peerTracker.go index 33c96522..30aa842f 100644 --- a/certexchange/polling/peerTracker.go +++ b/certexchange/polling/peerTracker.go @@ -209,17 +209,13 @@ func (t *peerTracker) makeActive(p peer.ID) { func (t *peerTracker) peerSeen(p peer.ID) { if _, ok := t.peers[p]; !ok { - t.peers[p] = &peerRecord{state: peerActive} + t.peers[p] = &peerRecord{state: peerActive, lastSeen: clk.Now()} t.active = append(t.active, p) } } -// Suggest a number of peers from which to request new certificates based on their historical -// record. -// -// TODO: Add a multiplier if we're not making progress. -func (t *peerTracker) suggestPeers() []peer.ID { - // Advance the round and move peers from backoff to active, if necessary. +// Advance the round and move peers from backoff to active, if necessary. +func (t *peerTracker) advanceRound() { for t.backoff.Len() > 0 { r := t.backoff[len(t.backoff)-1] if r.delayUntil > t.currentRound { @@ -229,6 +225,12 @@ func (t *peerTracker) suggestPeers() []peer.ID { t.makeActive(r.peer) } t.currentRound++ +} + +// Suggest a number of peers from which to request new certificates based on their historical +// record. +func (t *peerTracker) suggestPeers() []peer.ID { + t.advanceRound() // Sort from best to worst. slices.SortFunc(t.active, func(a, b peer.ID) int { From 33e8117273242ecd976f741d1b66711817631ebe Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 13:18:10 +0200 Subject: [PATCH 35/43] implement GC --- certexchange/polling/peerTracker.go | 146 ++++++++++++++++++++--- certexchange/polling/peerTracker_test.go | 23 +++- 2 files changed, 146 insertions(+), 23 deletions(-) diff --git a/certexchange/polling/peerTracker.go b/certexchange/polling/peerTracker.go index 30aa842f..b356c8cf 100644 --- a/certexchange/polling/peerTracker.go +++ b/certexchange/polling/peerTracker.go @@ -5,6 +5,7 @@ import ( "container/heap" "math/rand" "slices" + "time" "github.com/libp2p/go-libp2p/core/peer" ) @@ -21,6 +22,10 @@ const ( maxRequests = 32 // How confident should we be that we've suggested enough peers. 1.125 == 112.5% targetConfidence = 1.125 + + gcHighWater = 10000 // GC the oldest peers when we have 10k + gcLowWater = 1000 // GC down to 1000 peers + gcFailureCuttoff = 3 // Ignore failure counts below N when garbage collecting. ) type peerState int @@ -34,6 +39,9 @@ const ( // TODO: Track latency and connectedness. type peerRecord struct { + id peer.ID + + // Number of sequential failures since the last successful request. sequentialFailures int // Sliding windows of hits/misses (0-3 each). If either would exceed 3, we subtract 1 from @@ -45,6 +53,8 @@ type peerRecord struct { hits, misses int state peerState + + lastSeen time.Time } type backoffHeap []*backoffRecord @@ -62,9 +72,7 @@ func newPeerTracker() *peerTracker { } type peerTracker struct { - // TODO: garbage collect this. - peers map[peer.ID]*peerRecord - // TODO: Limit the number of active peers. + peers map[peer.ID]*peerRecord active []peer.ID backoff backoffHeap lastHitRound, currentRound int @@ -136,6 +144,7 @@ func (r *peerRecord) recordFailure() int { func (r *peerRecord) recordHit() { r.sequentialFailures = 0 + r.lastSeen = time.Now() if r.hits < hitMissSlidingWindow { r.hits++ } else if r.misses > 0 { @@ -145,6 +154,7 @@ func (r *peerRecord) recordHit() { func (r *peerRecord) recordMiss() { r.sequentialFailures = 0 + r.lastSeen = time.Now() if r.misses < hitMissSlidingWindow { r.misses++ } else if r.hits > 0 { @@ -152,6 +162,12 @@ func (r *peerRecord) recordMiss() { } } +func (r *peerRecord) recordInvalid() { + r.state = peerEvil + r.sequentialFailures = 0 + r.lastSeen = time.Now() +} + // Return the hit rate a number between 0-10 indicating how "full" our window is. func (r *peerRecord) hitRate() (float64, int) { total := r.hits + r.misses @@ -167,14 +183,15 @@ func (r *peerRecord) hitRate() (float64, int) { func (t *peerTracker) getOrCreate(p peer.ID) *peerRecord { r, ok := t.peers[p] if !ok { - r = new(peerRecord) + now := clk.Now() + r = &peerRecord{id: p, lastSeen: now} t.peers[p] = r } return r } func (t *peerTracker) recordInvalid(p peer.ID) { - t.getOrCreate(p).state = peerEvil + t.getOrCreate(p).recordInvalid() } func (t *peerTracker) recordMiss(p peer.ID) { @@ -195,8 +212,13 @@ func (t *peerTracker) recordHit(p peer.ID) { t.getOrCreate(p).recordHit() } -func (t *peerTracker) makeActive(p peer.ID) { - r := t.getOrCreate(p) +// Reactivate a peer from backoff. +func (t *peerTracker) reactivate(p peer.ID) { + r, ok := t.peers[p] + if !ok { + // If we're not still tracking this peer, just forget about them. + return + } switch r.state { case peerEvil, peerActive: return @@ -208,10 +230,90 @@ func (t *peerTracker) makeActive(p peer.ID) { } func (t *peerTracker) peerSeen(p peer.ID) { - if _, ok := t.peers[p]; !ok { - t.peers[p] = &peerRecord{state: peerActive, lastSeen: clk.Now()} + now := clk.Now() + if r, ok := t.peers[p]; !ok { + t.peers[p] = &peerRecord{id: p, state: peerActive, lastSeen: now} t.active = append(t.active, p) + t.maybeGc() + } else { + r.lastSeen = now + } +} + +// Garbage collect peers down to our "low" water mark (1000) +func (t *peerTracker) maybeGc() { + if len(t.peers) < gcHighWater { + return + } + eligable := make([]*peerRecord, 0, len(t.peers)) + + for _, r := range t.peers { + eligable = append(eligable, r) + } + + slices.SortFunc(eligable, func(a, b *peerRecord) int { + // Evil peers always sort later. + if (a.state == peerEvil) != (b.state == peerEvil) { + return cmp.Compare(b.state, a.state) + } + + // Peers with 3+ sequential failures are considered "worse" than all peers with + // fewer failures. + if (a.sequentialFailures < gcFailureCuttoff) != (b.sequentialFailures < gcFailureCuttoff) { + return cmp.Compare(a.sequentialFailures, b.sequentialFailures) + } + + // Then compare success metrics. + + hitRateA, hitCountA := a.hitRate() + hitRateB, hitCountB := b.hitRate() + + // If exactly one of the peers has a 0 hit-rate, it's always worse than the other. + if (hitRateA == 0) != (hitRateB == 0) { + return cmp.Compare(hitRateB, hitRateA) + } + + // If we have no information on one peer but have information on the other, prefer + // the peer we have information on. + if (hitCountA == 0) != (hitCountB == 0) { + return cmp.Compare(hitCountB, hitCountA) + } + + // Otherwise, pick the peer with the higher hit rate. + if c := cmp.Compare(hitRateB, hitRateA); c != 0 { + return c + } + + // Otherwise, pick the peer with more information. + if c := cmp.Compare(hitCountB, hitCountA); c != 0 { + return c + } + + // Finally, exclude the peer with the most request failures. + if c := cmp.Compare(a.sequentialFailures, b.sequentialFailures); c != 0 { + return c + } + + return 0 + }) + + // gc + for _, r := range eligable[gcLowWater:] { + delete(t.peers, r.id) } + + // filter active peers + t.active = slices.DeleteFunc(t.active, func(p peer.ID) bool { + _, ok := t.peers[p] + return !ok + }) + + // filter backoff + t.backoff = slices.DeleteFunc(t.backoff, func(r *backoffRecord) bool { + _, ok := t.peers[r.peer] + return !ok + }) + heap.Init(&t.backoff) } // Advance the round and move peers from backoff to active, if necessary. @@ -222,32 +324,40 @@ func (t *peerTracker) advanceRound() { break } heap.Pop(&t.backoff) - t.makeActive(r.peer) + t.reactivate(r.peer) } t.currentRound++ } -// Suggest a number of peers from which to request new certificates based on their historical -// record. -func (t *peerTracker) suggestPeers() []peer.ID { - t.advanceRound() - +// Re-rank peers. +func (t *peerTracker) rank() { // Sort from best to worst. slices.SortFunc(t.active, func(a, b peer.ID) int { return t.getOrCreate(b).Cmp(t.getOrCreate(a)) }) + // Trim off any inactive/evil peers from the end, they'll be sorted last. + activePeers := len(t.active) trimLoop: - for l := len(t.active); l > 0; l-- { - r := t.getOrCreate(t.active[l-1]) + for ; activePeers > 0; activePeers-- { + r := t.getOrCreate(t.active[activePeers-1]) switch r.state { case peerActive: break trimLoop case peerDeactivating: r.state = peerInactive } - t.active = t.active[:l-1] } + clear(t.active[activePeers:]) + t.active = t.active[:activePeers] +} + +// Suggest a number of peers from which to request new certificates based on their historical +// record. +func (t *peerTracker) suggestPeers() []peer.ID { + t.advanceRound() + t.maybeGc() + t.rank() // Adjust the minimum peer count and probability threshold based on the current distance to // the last successful round (capped at 8 rounds). diff --git a/certexchange/polling/peerTracker_test.go b/certexchange/polling/peerTracker_test.go index 34365ba8..454671b9 100644 --- a/certexchange/polling/peerTracker_test.go +++ b/certexchange/polling/peerTracker_test.go @@ -166,7 +166,7 @@ func TestPeerTracker(t *testing.T) { // We always pick at least 4 peers, even if we have high confidence in one. { suggested := pt.suggestPeers() - require.Len(t, suggested, 4) + require.Len(t, suggested, minRequests) require.Equal(t, peers[0], suggested[0]) } @@ -180,12 +180,24 @@ func TestPeerTracker(t *testing.T) { pt.peerSeen(peers[0]) } + pt.recordHit(peers[1]) + require.Contains(t, pt.suggestPeers(), peers[1]) + pt.lastHitRound = pt.currentRound + + // Discover a whole bunch of peers. + discoverPeers(gcHighWater) + + // We should have garbage collected + require.Less(t, len(pt.peers), gcHighWater) + + { + // The new peers shouldn't affect out discovered peers. + require.Contains(t, pt.suggestPeers(), peers[1]) + } + // We should never suggest more than 32 peers at a time. { - // Now, add a bunch of peers. - discoverPeers(100) - // And treat them all as "bad". - for _, p := range peers { + for p := range pt.peers { for i := 0; i < 10; i++ { pt.recordMiss(p) } @@ -193,4 +205,5 @@ func TestPeerTracker(t *testing.T) { require.Len(t, pt.suggestPeers(), maxRequests) } + } From f78af6e541b4f2af55d977b9a572d73a41a9464d Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 16:20:31 +0200 Subject: [PATCH 36/43] latency tracking --- certexchange/polling/peerTracker.go | 31 ++++++++++++++---- certexchange/polling/poller.go | 39 ++++++++++++++++------- certexchange/polling/poller_test.go | 12 +++---- certexchange/polling/pollresult_string.go | 4 +-- certexchange/polling/subscriber.go | 6 ++-- 5 files changed, 64 insertions(+), 28 deletions(-) diff --git a/certexchange/polling/peerTracker.go b/certexchange/polling/peerTracker.go index b356c8cf..58f4f438 100644 --- a/certexchange/polling/peerTracker.go +++ b/certexchange/polling/peerTracker.go @@ -26,6 +26,9 @@ const ( gcHighWater = 10000 // GC the oldest peers when we have 10k gcLowWater = 1000 // GC down to 1000 peers gcFailureCuttoff = 3 // Ignore failure counts below N when garbage collecting. + + // EWMA alpha for latency tracking (0-1, smaller numbers favor newer readings) + latencyAlpha = 0.7 ) type peerState int @@ -40,10 +43,8 @@ const ( // TODO: Track latency and connectedness. type peerRecord struct { id peer.ID - // Number of sequential failures since the last successful request. sequentialFailures int - // Sliding windows of hits/misses (0-3 each). If either would exceed 3, we subtract 1 from // both (where 0 is the floor). // @@ -51,10 +52,9 @@ type peerRecord struct { // - We don't use a simple weighted moving average because that doesn't track how "sure" we // are of the measurement. hits, misses int - - state peerState - - lastSeen time.Time + state peerState + lastSeen time.Time + latency time.Duration } type backoffHeap []*backoffRecord @@ -88,6 +88,13 @@ func (r *peerRecord) Cmp(other *peerRecord) int { if c := cmp.Compare(rateA, rateB); c != 0 { return c } + + // If we have latency measurements for both, prefer the peer with lower latency. + if r.latency > 0 && other.latency > 0 { + if c := cmp.Compare(other.latency, r.latency); c != 0 { + return c + } + } if c := cmp.Compare(countA, countB); c != 0 { return c } @@ -142,6 +149,14 @@ func (r *peerRecord) recordFailure() int { return delay } +func (r *peerRecord) updateLatency(d time.Duration) { + if r.latency > 0 { + r.latency += time.Duration(latencyAlpha * float64(d-r.latency)) + } else { + r.latency = d + } +} + func (r *peerRecord) recordHit() { r.sequentialFailures = 0 r.lastSeen = time.Now() @@ -190,6 +205,10 @@ func (t *peerTracker) getOrCreate(p peer.ID) *peerRecord { return r } +func (t *peerTracker) updateLatency(p peer.ID, d time.Duration) { + t.getOrCreate(p).updateLatency(d) +} + func (t *peerTracker) recordInvalid(p peer.ID) { t.getOrCreate(p).recordInvalid() } diff --git a/certexchange/polling/poller.go b/certexchange/polling/poller.go index 59d7de54..1ff6fb10 100644 --- a/certexchange/polling/poller.go +++ b/certexchange/polling/poller.go @@ -3,6 +3,7 @@ package polling import ( "context" + "time" "github.com/filecoin-project/go-f3/certexchange" "github.com/filecoin-project/go-f3/certs" @@ -40,16 +41,23 @@ func NewPoller(ctx context.Context, client *certexchange.Client, store *certstor }, nil } -type PollResult int +type PollResult struct { + Status PollStatus + Progress uint64 + Latency time.Duration + Error error +} + +type PollStatus int const ( - PollMiss PollResult = iota + PollMiss PollStatus = iota PollHit PollFailed PollIllegal ) -func (p PollResult) GoString() string { +func (p PollStatus) GoString() string { return p.String() } @@ -82,28 +90,32 @@ func (p *Poller) CatchUp(ctx context.Context) (uint64, error) { // // 1. A PollResult indicating the outcome: miss, hit, failed, illegal. // 2. An error if something went wrong internally (e.g., the certificate store returned an error). -func (p *Poller) Poll(ctx context.Context, peer peer.ID) (PollResult, error) { - var result PollResult +func (p *Poller) Poll(ctx context.Context, peer peer.ID) (*PollResult, error) { + res := new(PollResult) for { // Requests take time, so always try to catch-up between requests in case there has // been some "local" action from the GPBFT instance. if _, err := p.CatchUp(ctx); err != nil { - return PollFailed, err + return nil, err } + start := clk.Now() resp, ch, err := p.Request(ctx, peer, &certexchange.Request{ FirstInstance: p.NextInstance, Limit: maxRequestLength, IncludePowerTable: false, }) + res.Latency = clk.Since(start) if err != nil { - return PollFailed, nil + res.Status = PollFailed + res.Error = err + return res, nil } // If they're caught up, record it as a hit. Otherwise, if they have nothing // to give us, move on. if resp.PendingInstance >= p.NextInstance { - result = PollHit + res.Status = PollHit } received := 0 @@ -114,24 +126,27 @@ func (p *Poller) Poll(ctx context.Context, peer peer.ID) (PollResult, error) { *cert, ) if err != nil { - return PollIllegal, nil + res.Status = PollIllegal + return res, nil } if err := p.Store.Put(ctx, cert); err != nil { - return PollHit, err + return nil, err } p.NextInstance = next p.PowerTable = pt received++ } + res.Progress += uint64(received) // Try again if they're claiming to have more instances (and gave me at // least one). if resp.PendingInstance <= p.NextInstance { - return result, nil + return res, nil } else if received == 0 { + res.Status = PollFailed // If they give me no certificates but claim to have more, treat this as a // failure (could be a connection failure, etc). - return PollFailed, nil + return res, nil } } diff --git a/certexchange/polling/poller_test.go b/certexchange/polling/poller_test.go index 93d6e12a..6b9a1c93 100644 --- a/certexchange/polling/poller_test.go +++ b/certexchange/polling/poller_test.go @@ -83,7 +83,7 @@ func TestPoller(t *testing.T) { for i := 0; i < 2; i++ { res, err := poller.Poll(ctx, serverHost.ID()) require.NoError(t, err) - require.Equal(t, polling.PollHit, res) + require.Equal(t, polling.PollHit, res.Status) require.Equal(t, uint64(certificatesAdded), poller.NextInstance) } @@ -93,7 +93,7 @@ func TestPoller(t *testing.T) { res, err := poller.Poll(ctx, serverHost.ID()) require.NoError(t, err) - require.Equal(t, polling.PollMiss, res) + require.Equal(t, polling.PollMiss, res.Status) } // Add that cert to the server. @@ -104,7 +104,7 @@ func TestPoller(t *testing.T) { { res, err := poller.Poll(ctx, serverHost.ID()) require.NoError(t, err) - require.Equal(t, polling.PollHit, res) + require.Equal(t, polling.PollHit, res.Status) } // Add more than the request maximum (up till the last cert) @@ -116,7 +116,7 @@ func TestPoller(t *testing.T) { { res, err := poller.Poll(ctx, serverHost.ID()) require.NoError(t, err) - require.Equal(t, polling.PollHit, res) + require.Equal(t, polling.PollHit, res.Status) require.Equal(t, uint64(certificatesAdded), poller.NextInstance) } @@ -128,7 +128,7 @@ func TestPoller(t *testing.T) { res, err := poller.Poll(ctx, serverHost.ID()) require.NoError(t, err) - require.Equal(t, polling.PollIllegal, res) + require.Equal(t, polling.PollIllegal, res.Status) // And we don't store certificates from them! require.Equal(t, uint64(certificatesAdded), poller.NextInstance) @@ -142,6 +142,6 @@ func TestPoller(t *testing.T) { { res, err := poller.Poll(ctx, serverHost.ID()) require.NoError(t, err) - require.Equal(t, polling.PollFailed, res) + require.Equal(t, polling.PollFailed, res.Status) } } diff --git a/certexchange/polling/pollresult_string.go b/certexchange/polling/pollresult_string.go index b7e18e00..c954c656 100644 --- a/certexchange/polling/pollresult_string.go +++ b/certexchange/polling/pollresult_string.go @@ -18,8 +18,8 @@ const _PollResult_name = "PollMissPollHitPollFailedPollIllegal" var _PollResult_index = [...]uint8{0, 8, 15, 25, 36} -func (i PollResult) String() string { - if i < 0 || i >= PollResult(len(_PollResult_index)-1) { +func (i PollStatus) String() string { + if i < 0 || i >= PollStatus(len(_PollResult_index)-1) { return "PollResult(" + strconv.FormatInt(int64(i), 10) + ")" } return _PollResult_name[_PollResult_index[i]:_PollResult_index[i+1]] diff --git a/certexchange/polling/subscriber.go b/certexchange/polling/subscriber.go index 6e31ada5..5a83e854 100644 --- a/certexchange/polling/subscriber.go +++ b/certexchange/polling/subscriber.go @@ -139,18 +139,20 @@ func (s *Subscriber) poll(ctx context.Context) (uint64, error) { if err != nil { return s.poller.NextInstance - start, err } - s.Log.Debugf("polled %s for instance %d, got %s", peer, s.poller.NextInstance, res) + s.Log.Debugf("polled %s for instance %d, got %+v", peer, s.poller.NextInstance, res) // If we manage to advance, all old "hits" are actually misses. if oldInstance < s.poller.NextInstance { misses = append(misses, hits...) hits = hits[:0] } - switch res { + switch res.Status { case PollMiss: misses = append(misses, peer) + s.peerTracker.updateLatency(peer, res.Latency) case PollHit: hits = append(hits, peer) + s.peerTracker.updateLatency(peer, res.Latency) case PollFailed: s.peerTracker.recordFailure(peer) case PollIllegal: From e4086e2d1b188015f20948b579c19f3ea366dfcc Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 17:20:18 +0200 Subject: [PATCH 37/43] improve reliability of subscribe tests --- certexchange/polling/subscriber_test.go | 81 ++++++++++--------------- 1 file changed, 31 insertions(+), 50 deletions(-) diff --git a/certexchange/polling/subscriber_test.go b/certexchange/polling/subscriber_test.go index 867fef24..a8ad183d 100644 --- a/certexchange/polling/subscriber_test.go +++ b/certexchange/polling/subscriber_test.go @@ -81,60 +81,41 @@ func TestSubscriber(t *testing.T) { require.NoError(t, mocknet.ConnectAllButSelf()) - for _, s := range servers { - require.NoError(t, s.Store.Put(ctx, certificates[0])) - } - - polling.MockClock.Add(100 * time.Millisecond) - - require.Eventually(t, func() bool { - return clientCs.Latest() != nil - }, time.Second, 10*time.Millisecond) - - require.Equal(t, uint64(0), clientCs.Latest().GPBFTInstance) - - // Slowly drop servers from the network liveServers := slices.Clone(servers) - - // Let the network run fine for a few rounds. - nextInstance := uint64(1) - for ; nextInstance < 10; nextInstance++ { + for i := 0; len(liveServers) > 0; i++ { for _, s := range liveServers { - require.NoError(t, s.Store.Put(ctx, certificates[nextInstance])) + require.NoError(t, s.Store.Put(ctx, certificates[i])) } - i := 0 - require.Eventually(t, func() bool { - i += 10 - polling.MockClock.Add(10 * time.Millisecond) - return clientCs.Latest().GPBFTInstance == nextInstance - }, 10*time.Second, time.Millisecond) - - polling.MockClock.Add(time.Duration(max(0, 100-i)) * time.Millisecond) - } - - // Then kill 20% of the network every three instances. - for ; len(liveServers) > 0; nextInstance++ { - for _, s := range liveServers { - require.NoError(t, s.Store.Put(ctx, certificates[nextInstance])) + if i < 10 { + // We put the first 10 certificates regularly and don't expect any + // variation in timing. + polling.MockClock.Add(100 * time.Millisecond) + require.Eventually(t, func() bool { + latest := clientCs.Latest() + return latest != nil && latest.GPBFTInstance == uint64(i) + }, 10*time.Second, time.Millisecond) + } else { + // After we settle for a bit, every 4 instances, stop updating 20% of the + // network. We now do expect some variation in timing so we can't just wait + // 100ms each time. + polling.MockClock.WaitForAllTimers() + + require.Eventually(t, func() bool { + latest := clientCs.Latest() + found := latest != nil && latest.GPBFTInstance == uint64(i) + if !found { + polling.MockClock.WaitForAllTimers() + } + return found + }, 10*time.Second, time.Millisecond) + + if i%4 == 0 { + rand.Shuffle(len(liveServers), func(a, b int) { + liveServers[a], liveServers[b] = liveServers[b], liveServers[a] + }) + liveServers = liveServers[:8*len(liveServers)/10] + } } - - i := 0 - require.Eventually(t, func() bool { - i += 10 - polling.MockClock.Add(10 * time.Millisecond) - return clientCs.Latest().GPBFTInstance == uint64(nextInstance) - }, 10*time.Second, time.Millisecond) - - polling.MockClock.Add(time.Duration(max(0, 100-i)) * time.Millisecond) - - // Every 4 instances, stop updating 20% of the network. - if nextInstance%4 == 0 { - rand.Shuffle(len(liveServers), func(a, b int) { - liveServers[a], liveServers[b] = liveServers[b], liveServers[a] - }) - liveServers = liveServers[:8*len(liveServers)/10] - } - } } From dd747a7215ae2960fb45878e855d9a93ae25dd3a Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 17:25:43 +0200 Subject: [PATCH 38/43] fix comment --- certexchange/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/certexchange/client.go b/certexchange/client.go index fe664b7a..cf4ac4be 100644 --- a/certexchange/client.go +++ b/certexchange/client.go @@ -60,7 +60,7 @@ func (c *Client) Request(ctx context.Context, p peer.ID, req *Request) (_rh *Res if err != nil { return nil, nil, err } - // Reset the stream if the parent context is canceled. We never call the returned cancel + // Reset the stream if the parent context is canceled. We never call the returned stop // function because we call the cancel function returned by `withDeadline` (which cancels // the entire context tree). context.AfterFunc(ctx, func() { _ = stream.Reset() }) From d0f2fd6f6fb983a986f4a069bdf7712e85f70ff8 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 17:28:24 +0200 Subject: [PATCH 39/43] fix build --- certexchange/polling/poller.go | 2 +- certexchange/polling/pollresult_string.go | 26 ----------------------- certexchange/polling/pollstatus_string.go | 26 +++++++++++++++++++++++ 3 files changed, 27 insertions(+), 27 deletions(-) delete mode 100644 certexchange/polling/pollresult_string.go create mode 100644 certexchange/polling/pollstatus_string.go diff --git a/certexchange/polling/poller.go b/certexchange/polling/poller.go index 1ff6fb10..99060ff4 100644 --- a/certexchange/polling/poller.go +++ b/certexchange/polling/poller.go @@ -1,4 +1,4 @@ -//go:generate go run golang.org/x/tools/cmd/stringer -type=PollResult +//go:generate go run golang.org/x/tools/cmd/stringer -type=PollStatus package polling import ( diff --git a/certexchange/polling/pollresult_string.go b/certexchange/polling/pollresult_string.go deleted file mode 100644 index c954c656..00000000 --- a/certexchange/polling/pollresult_string.go +++ /dev/null @@ -1,26 +0,0 @@ -// Code generated by "stringer -type=PollResult"; DO NOT EDIT. - -package polling - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[PollMiss-0] - _ = x[PollHit-1] - _ = x[PollFailed-2] - _ = x[PollIllegal-3] -} - -const _PollResult_name = "PollMissPollHitPollFailedPollIllegal" - -var _PollResult_index = [...]uint8{0, 8, 15, 25, 36} - -func (i PollStatus) String() string { - if i < 0 || i >= PollStatus(len(_PollResult_index)-1) { - return "PollResult(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _PollResult_name[_PollResult_index[i]:_PollResult_index[i+1]] -} diff --git a/certexchange/polling/pollstatus_string.go b/certexchange/polling/pollstatus_string.go new file mode 100644 index 00000000..667cd6a7 --- /dev/null +++ b/certexchange/polling/pollstatus_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=PollStatus"; DO NOT EDIT. + +package polling + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[PollMiss-0] + _ = x[PollHit-1] + _ = x[PollFailed-2] + _ = x[PollIllegal-3] +} + +const _PollStatus_name = "PollMissPollHitPollFailedPollIllegal" + +var _PollStatus_index = [...]uint8{0, 8, 15, 25, 36} + +func (i PollStatus) String() string { + if i < 0 || i >= PollStatus(len(_PollStatus_index)-1) { + return "PollStatus(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _PollStatus_name[_PollStatus_index[i]:_PollStatus_index[i+1]] +} From 8f386bf86cc4def4a57d848e7f46f2ed27c613e2 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 18:52:51 +0200 Subject: [PATCH 40/43] spelling --- certexchange/polling/peerTracker.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/certexchange/polling/peerTracker.go b/certexchange/polling/peerTracker.go index 58f4f438..fb3ef213 100644 --- a/certexchange/polling/peerTracker.go +++ b/certexchange/polling/peerTracker.go @@ -264,13 +264,13 @@ func (t *peerTracker) maybeGc() { if len(t.peers) < gcHighWater { return } - eligable := make([]*peerRecord, 0, len(t.peers)) + eligible := make([]*peerRecord, 0, len(t.peers)) for _, r := range t.peers { - eligable = append(eligable, r) + eligible = append(eligible, r) } - slices.SortFunc(eligable, func(a, b *peerRecord) int { + slices.SortFunc(eligible, func(a, b *peerRecord) int { // Evil peers always sort later. if (a.state == peerEvil) != (b.state == peerEvil) { return cmp.Compare(b.state, a.state) @@ -317,7 +317,7 @@ func (t *peerTracker) maybeGc() { }) // gc - for _, r := range eligable[gcLowWater:] { + for _, r := range eligible[gcLowWater:] { delete(t.peers, r.id) } From 6dd8e2037439085bcbbeb211ae47f5018acaaf78 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 19:08:13 +0200 Subject: [PATCH 41/43] fix heap usage --- certexchange/polling/peerTracker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/certexchange/polling/peerTracker.go b/certexchange/polling/peerTracker.go index fb3ef213..606f6ec9 100644 --- a/certexchange/polling/peerTracker.go +++ b/certexchange/polling/peerTracker.go @@ -338,7 +338,7 @@ func (t *peerTracker) maybeGc() { // Advance the round and move peers from backoff to active, if necessary. func (t *peerTracker) advanceRound() { for t.backoff.Len() > 0 { - r := t.backoff[len(t.backoff)-1] + r := t.backoff[0] if r.delayUntil > t.currentRound { break } From f0ab0fa18fa9d072d1ab6cf657cffc9e191b194c Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 19:08:30 +0200 Subject: [PATCH 42/43] test heap --- certexchange/polling/peerTracker_test.go | 47 ++++++++++++++++++++---- 1 file changed, 39 insertions(+), 8 deletions(-) diff --git a/certexchange/polling/peerTracker_test.go b/certexchange/polling/peerTracker_test.go index 454671b9..19186d8a 100644 --- a/certexchange/polling/peerTracker_test.go +++ b/certexchange/polling/peerTracker_test.go @@ -136,19 +136,50 @@ func TestPeerTracker(t *testing.T) { // Now ensure we select that peer. It should be first because it's the best. pt.recordHit(peers[0]) + pt.recordHit(peers[0]) + + pt.recordHit(peers[1]) + pt.recordMiss(peers[1]) // needs to be worse than peer 1 + require.Equal(t, pt.suggestPeers()[0], peers[0]) // Now check to make sure we backoff that peer. - pt.recordFailure(peers[0]) - require.NotContains(t, pt.suggestPeers(), peers[0]) - // Should only last one round the first time. - require.Equal(t, pt.suggestPeers()[0], peers[0]) + { + pt.recordFailure(peers[0]) + pt.recordMiss(peers[1]) + suggested := pt.suggestPeers() + require.Equal(t, 1, pt.backoff.Len()) + require.NotContains(t, suggested, peers[0]) + require.Equal(t, suggested[0], peers[1]) + + // Peer 0 should be back on-top after one round absent. + suggested = pt.suggestPeers() + require.Empty(t, pt.backoff) + require.Equal(t, suggested[0], peers[0]) + } // Should last two rounds the second time (exponential). - pt.recordFailure(peers[0]) - require.NotContains(t, pt.suggestPeers(), peers[0]) - require.NotContains(t, pt.suggestPeers(), peers[0]) - require.Equal(t, pt.suggestPeers()[0], peers[0]) + + { + pt.recordFailure(peers[0]) + pt.recordFailure(peers[1]) + + suggested := pt.suggestPeers() + require.Equal(t, 2, pt.backoff.Len()) + require.NotContains(t, suggested, peers[0]) + require.NotContains(t, suggested, peers[1]) + + suggested = pt.suggestPeers() + require.Equal(t, 1, pt.backoff.Len()) + require.Contains(t, suggested, peers[1]) + require.NotContains(t, suggested, peers[0]) + + suggested = pt.suggestPeers() + require.Len(t, pt.backoff, 0) + require.Contains(t, pt.active, peers[0]) + require.Contains(t, suggested, peers[0]) + require.Contains(t, suggested, peers[1]) + } // Then four rounds. pt.recordFailure(peers[0]) From c0508022717a42d8243c2cb4b8a01802f7f7425a Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Jul 2024 20:03:44 +0200 Subject: [PATCH 43/43] make the subscribe test pass more reliably --- certexchange/polling/subscriber_test.go | 40 ++++++++----------------- 1 file changed, 12 insertions(+), 28 deletions(-) diff --git a/certexchange/polling/subscriber_test.go b/certexchange/polling/subscriber_test.go index a8ad183d..4e87832d 100644 --- a/certexchange/polling/subscriber_test.go +++ b/certexchange/polling/subscriber_test.go @@ -87,35 +87,19 @@ func TestSubscriber(t *testing.T) { require.NoError(t, s.Store.Put(ctx, certificates[i])) } - if i < 10 { - // We put the first 10 certificates regularly and don't expect any - // variation in timing. - polling.MockClock.Add(100 * time.Millisecond) - require.Eventually(t, func() bool { - latest := clientCs.Latest() - return latest != nil && latest.GPBFTInstance == uint64(i) - }, 10*time.Second, time.Millisecond) - } else { - // After we settle for a bit, every 4 instances, stop updating 20% of the - // network. We now do expect some variation in timing so we can't just wait - // 100ms each time. + require.Eventually(t, func() bool { polling.MockClock.WaitForAllTimers() - - require.Eventually(t, func() bool { - latest := clientCs.Latest() - found := latest != nil && latest.GPBFTInstance == uint64(i) - if !found { - polling.MockClock.WaitForAllTimers() - } - return found - }, 10*time.Second, time.Millisecond) - - if i%4 == 0 { - rand.Shuffle(len(liveServers), func(a, b int) { - liveServers[a], liveServers[b] = liveServers[b], liveServers[a] - }) - liveServers = liveServers[:8*len(liveServers)/10] - } + latest := clientCs.Latest() + return latest != nil && latest.GPBFTInstance == uint64(i) + }, 10*time.Second, time.Millisecond) + + // After we settle for a bit, every 4 instances, stop updating 20% of the + // network. + if i > 10 && i%4 == 0 { + rand.Shuffle(len(liveServers), func(a, b int) { + liveServers[a], liveServers[b] = liveServers[b], liveServers[a] + }) + liveServers = liveServers[:8*len(liveServers)/10] } } }