id
stringlengths
95
167
text
stringlengths
69
15.9k
title
stringclasses
1 value
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/net_transport.go#L321-L347
func (n *NetworkTransport) getConn(target ServerAddress) (*netConn, error) { // Check for a pooled conn if conn := n.getPooledConn(target); conn != nil { return conn, nil } // Dial a new connection conn, err := n.stream.Dial(target, n.timeout) if err != nil { return nil, err } // Wrap the conn netConn := &netConn{ target: target, conn: conn, r: bufio.NewReader(conn), w: bufio.NewWriter(conn), } // Setup encoder/decoders netConn.dec = codec.NewDecoder(netConn.r, &codec.MsgpackHandle{}) netConn.enc = codec.NewEncoder(netConn.w, &codec.MsgpackHandle{}) // Done return netConn, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/snapshot/v3_snapshot.go#L72-L77
func NewV3(lg *zap.Logger) Manager { if lg == nil { lg = zap.NewExample() } return &v3Manager{lg: lg} }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/net_transport.go#L366-L375
func (n *NetworkTransport) AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error) { // Get a connection conn, err := n.getConnFromAddressProvider(id, target) if err != nil { return nil, err } // Create the pipeline return newNetPipeline(n, conn), nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/stm.go#L208-L214
func (rs readSet) cmps() []v3.Cmp { cmps := make([]v3.Cmp, 0, len(rs)) for k, rk := range rs { cmps = append(cmps, isKeyCurrent(k, rk)) } return cmps }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/balancer/grpc1.7-health.go#L269-L277
func (b *GRPC17Health) NeedUpdate() bool { // updating notifyCh can trigger new connections, // need update addrs if all connections are down // or addrs does not include pinAddr. b.mu.RLock() update := !hasAddr(b.addrs, b.pinAddr) b.mu.RUnlock() return update }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/level_handler.go#L105-L139
func (s *levelHandler) replaceTables(toDel, toAdd []*table.Table) error { // Need to re-search the range of tables in this level to be replaced as other goroutines might // be changing it as well. (They can't touch our tables, but if they add/remove other tables, // the indices get shifted around.) s.Lock() // We s.Unlock() below. toDelMap := make(map[uint64]struct{}) for _, t := range toDel { toDelMap[t.ID()] = struct{}{} } var newTables []*table.Table for _, t := range s.tables { _, found := toDelMap[t.ID()] if !found { newTables = append(newTables, t) continue } s.totalSize -= t.Size() } // Increase totalSize first. for _, t := range toAdd { s.totalSize += t.Size() t.IncrRef() newTables = append(newTables, t) } // Assign tables. s.tables = newTables sort.Slice(s.tables, func(i, j int) bool { return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0 }) s.Unlock() // s.Unlock before we DecrRef tables -- that can be slow. return decrRefs(toDel) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/rawnode.go#L198-L203
func (rn *RawNode) Ready() Ready { rd := rn.newReady() rn.raft.msgs = nil rn.raft.reduceUncommittedSize(rd.CommittedEntries) return rd }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/cluster_util.go#L35-L49
func isMemberBootstrapped(lg *zap.Logger, cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool { rcl, err := getClusterFromRemotePeers(lg, getRemotePeerURLs(cl, member), timeout, false, rt) if err != nil { return false } id := cl.MemberByName(member).ID m := rcl.Member(id) if m == nil { return false } if len(m.ClientURLs) > 0 { return true } return false }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/raft.go#L401-L404
func (r *raftNode) pauseSending() { p := r.transport.(rafthttp.Pausable) p.Pause() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/backend/batch_tx.go#L207-L211
func (t *batchTx) Commit() { t.Lock() t.commit(false) t.Unlock() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/rafthttp/util.go#L120-L131
func compareMajorMinorVersion(a, b *semver.Version) int { na := &semver.Version{Major: a.Major, Minor: a.Minor} nb := &semver.Version{Major: b.Major, Minor: b.Minor} switch { case na.LessThan(*nb): return -1 case nb.LessThan(*na): return 1 default: return 0 } }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/observer.go#L60-L67
func NewObserver(channel chan Observation, blocking bool, filter FilterFn) *Observer { return &Observer{ channel: channel, blocking: blocking, filter: filter, id: atomic.AddUint64(&nextObserverID, 1), } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/etcdserverpb/gw/rpc.pb.gw.go#L888-L890
func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { return RegisterWatchHandlerClient(ctx, mux, etcdserverpb.NewWatchClient(conn)) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/fileutil/fileutil.go#L48-L58
func TouchDirAll(dir string) error { // If path is already a directory, MkdirAll does nothing // and returns nil. err := os.MkdirAll(dir, PrivateDirMode) if err != nil { // if mkdirAll("a/text") and "text" is not // a directory, this will return syscall.ENOTDIR return err } return IsDirWriteable(dir) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/leasing/kv.go#L186-L197
func (lkv *leasingKV) rescind(ctx context.Context, key string, rev int64) { if lkv.leases.Evict(key) > rev { return } cmp := v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev) op := v3.OpDelete(lkv.pfx + key) for ctx.Err() == nil { if _, err := lkv.kv.Txn(ctx).If(cmp).Then(op).Commit(); err == nil { return } } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/wal/repair.go#L30-L131
func Repair(lg *zap.Logger, dirpath string) bool { f, err := openLast(lg, dirpath) if err != nil { return false } defer f.Close() if lg != nil { lg.Info("repairing", zap.String("path", f.Name())) } else { plog.Noticef("repairing %v", f.Name()) } rec := &walpb.Record{} decoder := newDecoder(f) for { lastOffset := decoder.lastOffset() err := decoder.decode(rec) switch err { case nil: // update crc of the decoder when necessary switch rec.Type { case crcType: crc := decoder.crc.Sum32() // current crc of decoder must match the crc of the record. // do no need to match 0 crc, since the decoder is a new one at this case. if crc != 0 && rec.Validate(crc) != nil { return false } decoder.updateCRC(rec.Crc) } continue case io.EOF: if lg != nil { lg.Info("repaired", zap.String("path", f.Name()), zap.Error(io.EOF)) } return true case io.ErrUnexpectedEOF: bf, bferr := os.Create(f.Name() + ".broken") if bferr != nil { if lg != nil { lg.Warn("failed to create backup file", zap.String("path", f.Name()+".broken"), zap.Error(bferr)) } else { plog.Errorf("could not repair %v, failed to create backup file", f.Name()) } return false } defer bf.Close() if _, err = f.Seek(0, io.SeekStart); err != nil { if lg != nil { lg.Warn("failed to read file", zap.String("path", f.Name()), zap.Error(err)) } else { plog.Errorf("could not repair %v, failed to read file", f.Name()) } return false } if _, err = io.Copy(bf, f); err != nil { if lg != nil { lg.Warn("failed to copy", zap.String("from", f.Name()+".broken"), zap.String("to", f.Name()), zap.Error(err)) } else { plog.Errorf("could not repair %v, failed to copy file", f.Name()) } return false } if err = f.Truncate(lastOffset); err != nil { if lg != nil { lg.Warn("failed to truncate", zap.String("path", f.Name()), zap.Error(err)) } else { plog.Errorf("could not repair %v, failed to truncate file", f.Name()) } return false } if err = fileutil.Fsync(f.File); err != nil { if lg != nil { lg.Warn("failed to fsync", zap.String("path", f.Name()), zap.Error(err)) } else { plog.Errorf("could not repair %v, failed to sync file", f.Name()) } return false } if lg != nil { lg.Info("repaired", zap.String("path", f.Name()), zap.Error(io.ErrUnexpectedEOF)) } return true default: if lg != nil { lg.Warn("failed to repair", zap.String("path", f.Name()), zap.Error(err)) } else { plog.Errorf("could not repair error (%v)", err) } return false } } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/progress.go#L232-L242
func (in *inflights) growBuf() { newSize := len(in.buffer) * 2 if newSize == 0 { newSize = 1 } else if newSize > in.size { newSize = in.size } newBuffer := make([]uint64, newSize) copy(newBuffer, in.buffer) in.buffer = newBuffer }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/net_transport.go#L535-L623
func (n *NetworkTransport) handleCommand(r *bufio.Reader, dec *codec.Decoder, enc *codec.Encoder) error { // Get the rpc type rpcType, err := r.ReadByte() if err != nil { return err } // Create the RPC object respCh := make(chan RPCResponse, 1) rpc := RPC{ RespChan: respCh, } // Decode the command isHeartbeat := false switch rpcType { case rpcAppendEntries: var req AppendEntriesRequest if err := dec.Decode(&req); err != nil { return err } rpc.Command = &req // Check if this is a heartbeat if req.Term != 0 && req.Leader != nil && req.PrevLogEntry == 0 && req.PrevLogTerm == 0 && len(req.Entries) == 0 && req.LeaderCommitIndex == 0 { isHeartbeat = true } case rpcRequestVote: var req RequestVoteRequest if err := dec.Decode(&req); err != nil { return err } rpc.Command = &req case rpcInstallSnapshot: var req InstallSnapshotRequest if err := dec.Decode(&req); err != nil { return err } rpc.Command = &req rpc.Reader = io.LimitReader(r, req.Size) default: return fmt.Errorf("unknown rpc type %d", rpcType) } // Check for heartbeat fast-path if isHeartbeat { n.heartbeatFnLock.Lock() fn := n.heartbeatFn n.heartbeatFnLock.Unlock() if fn != nil { fn(rpc) goto RESP } } // Dispatch the RPC select { case n.consumeCh <- rpc: case <-n.shutdownCh: return ErrTransportShutdown } // Wait for response RESP: select { case resp := <-respCh: // Send the error first respErr := "" if resp.Error != nil { respErr = resp.Error.Error() } if err := enc.Encode(respErr); err != nil { return err } // Send the response if err := enc.Encode(resp.Response); err != nil { return err } case <-n.shutdownCh: return ErrTransportShutdown } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/adt/interval_tree.go#L46-L63
func (ivl *Interval) Compare(c Comparable) int { ivl2 := c.(*Interval) ivbCmpBegin := ivl.Begin.Compare(ivl2.Begin) ivbCmpEnd := ivl.Begin.Compare(ivl2.End) iveCmpBegin := ivl.End.Compare(ivl2.Begin) // ivl is left of ivl2 if ivbCmpBegin < 0 && iveCmpBegin <= 0 { return -1 } // iv is right of iv2 if ivbCmpEnd >= 0 { return 1 } return 0 }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/wal/util.go#L30-L36
func Exist(dir string) bool { names, err := fileutil.ReadDir(dir, fileutil.WithExt(".wal")) if err != nil { return false } return len(names) != 0 }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/server_access_control.go#L53-L65
func (ac *AccessController) IsHostWhitelisted(host string) bool { ac.hostWhitelistMu.RLock() defer ac.hostWhitelistMu.RUnlock() if len(ac.HostWhitelist) == 0 { // allow all return true } _, ok := ac.HostWhitelist["*"] if ok { return true } _, ok = ac.HostWhitelist[host] return ok }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/cluster_util.go#L151-L176
func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions { members := cl.Members() vers := make(map[string]*version.Versions) for _, m := range members { if m.ID == local { cv := "not_decided" if cl.Version() != nil { cv = cl.Version().String() } vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv} continue } ver, err := getVersion(lg, m, rt) if err != nil { if lg != nil { lg.Warn("failed to get version", zap.String("remote-member-id", m.ID.String()), zap.Error(err)) } else { plog.Warningf("cannot get the version of member %s (%v)", m.ID, err) } vers[m.ID.String()] = nil } else { vers[m.ID.String()] = ver } } return vers }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/manifest.go#L116-L118
func openOrCreateManifestFile(dir string, readOnly bool) (ret *manifestFile, result Manifest, err error) { return helpOpenOrCreateManifestFile(dir, readOnly, manifestDeletionsRewriteThreshold) }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/observer.go#L87-L91
func (r *Raft) DeregisterObserver(or *Observer) { r.observersLock.Lock() defer r.observersLock.Unlock() delete(r.observers, or.id) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/batch.go#L77-L95
func (wb *WriteBatch) SetEntry(e *Entry) error { wb.Lock() defer wb.Unlock() if err := wb.txn.SetEntry(e); err != ErrTxnTooBig { return err } // Txn has reached it's zenith. Commit now. if cerr := wb.commit(); cerr != nil { return cerr } // This time the error must not be ErrTxnTooBig, otherwise, we make the // error permanent. if err := wb.txn.SetEntry(e); err != nil { wb.err = err return err } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdmain/etcd.go#L301-L312
func startEtcd(cfg *embed.Config) (<-chan struct{}, <-chan error, error) { e, err := embed.StartEtcd(cfg) if err != nil { return nil, nil, err } osutil.RegisterInterruptHandler(e.Close) select { case <-e.Server.ReadyNotify(): // wait for e.Server to join the cluster case <-e.Server.StopNotify(): // publish aborted from 'ErrStopped' } return e.Server.StopNotify(), e.Err(), nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/raft.go#L604-L619
func (r *raft) maybeCommit() bool { // Preserving matchBuf across calls is an optimization // used to avoid allocating a new slice on each call. if cap(r.matchBuf) < len(r.prs) { r.matchBuf = make(uint64Slice, len(r.prs)) } r.matchBuf = r.matchBuf[:len(r.prs)] idx := 0 for _, p := range r.prs { r.matchBuf[idx] = p.Match idx++ } sort.Sort(&r.matchBuf) mci := r.matchBuf[len(r.matchBuf)-r.quorum()] return r.raftLog.maybeCommit(mci, r.Term) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/rpcpb/member.go#L156-L167
func (m *Member) Defrag() error { cli, err := m.CreateEtcdClient() if err != nil { return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint) } defer cli.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) _, err = cli.Defragment(ctx, m.EtcdClientEndpoint) cancel() return err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/member_command.go#L29-L41
func NewMemberCommand() *cobra.Command { mc := &cobra.Command{ Use: "member <subcommand>", Short: "Membership related commands", } mc.AddCommand(NewMemberAddCommand()) mc.AddCommand(NewMemberRemoveCommand()) mc.AddCommand(NewMemberUpdateCommand()) mc.AddCommand(NewMemberListCommand()) return mc }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/backend.go#L56-L65
func openSnapshotBackend(cfg ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) { snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) if err != nil { return nil, fmt.Errorf("failed to find database snapshot file (%v)", err) } if err := os.Rename(snapPath, cfg.backendPath()); err != nil { return nil, fmt.Errorf("failed to rename database snapshot file (%v)", err) } return openBackend(cfg), nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/inmem_transport.go#L113-L123
func (i *InmemTransport) RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error { rpcResp, err := i.makeRPC(target, args, nil, i.timeout) if err != nil { return err } // Copy the result back out := rpcResp.Response.(*RequestVoteResponse) *resp = *out return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/etcdserverpb/gw/rpc.pb.gw.go#L678-L680
func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { return RegisterKVHandlerClient(ctx, mux, etcdserverpb.NewKVClient(conn)) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3compactor/periodic.go#L178-L184
func (pc *Periodic) getCompactInterval() time.Duration { itv := pc.period if itv > time.Hour { itv = time.Hour } return itv }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/etcdserverpb/gw/rpc.pb.gw.go#L1275-L1277
func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { return RegisterClusterHandlerClient(ctx, mux, etcdserverpb.NewClusterClient(conn)) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/node.go#L604-L611
func MustSync(st, prevst pb.HardState, entsnum int) bool { // Persistent state on all servers: // (Updated on stable storage before responding to RPCs) // currentTerm // votedFor // log entries[] return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/session.go#L38-L72
func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()} for _, opt := range opts { opt(ops) } id := ops.leaseID if id == v3.NoLease { resp, err := client.Grant(ops.ctx, int64(ops.ttl)) if err != nil { return nil, err } id = v3.LeaseID(resp.ID) } ctx, cancel := context.WithCancel(ops.ctx) keepAlive, err := client.KeepAlive(ctx, id) if err != nil || keepAlive == nil { cancel() return nil, err } donec := make(chan struct{}) s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec} // keep the lease alive until client error or cancelled context go func() { defer close(donec) for range keepAlive { // eat messages until keep alive channel closes } }() return s, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/types/urlsmap.go#L88-L107
func parse(s string) map[string][]string { m := make(map[string][]string) for s != "" { key := s if i := strings.IndexAny(key, ","); i >= 0 { key, s = key[:i], key[i+1:] } else { s = "" } if key == "" { continue } value := "" if i := strings.Index(key, "="); i >= 0 { key, value = key[:i], key[i+1:] } m[key] = append(m[key], value) } return m }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/rafthttp/stream.go#L132-L149
func startStreamWriter(lg *zap.Logger, local, id types.ID, status *peerStatus, fs *stats.FollowerStats, r Raft) *streamWriter { w := &streamWriter{ lg: lg, localID: local, peerID: id, status: status, fs: fs, r: r, msgc: make(chan raftpb.Message, streamBufSize), connc: make(chan *outgoingConn), stopc: make(chan struct{}), done: make(chan struct{}), } go w.run() return w }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3compactor/revision.go#L64-L124
func (rc *Revision) Run() { prev := int64(0) go func() { for { select { case <-rc.ctx.Done(): return case <-rc.clock.After(revInterval): rc.mu.Lock() p := rc.paused rc.mu.Unlock() if p { continue } } rev := rc.rg.Rev() - rc.retention if rev <= 0 || rev == prev { continue } now := time.Now() if rc.lg != nil { rc.lg.Info( "starting auto revision compaction", zap.Int64("revision", rev), zap.Int64("revision-compaction-retention", rc.retention), ) } else { plog.Noticef("Starting auto-compaction at revision %d (retention: %d revisions)", rev, rc.retention) } _, err := rc.c.Compact(rc.ctx, &pb.CompactionRequest{Revision: rev}) if err == nil || err == mvcc.ErrCompacted { prev = rev if rc.lg != nil { rc.lg.Info( "completed auto revision compaction", zap.Int64("revision", rev), zap.Int64("revision-compaction-retention", rc.retention), zap.Duration("took", time.Since(now)), ) } else { plog.Noticef("Finished auto-compaction at revision %d", rev) } } else { if rc.lg != nil { rc.lg.Warn( "failed auto revision compaction", zap.Int64("revision", rev), zap.Int64("revision-compaction-retention", rc.retention), zap.Duration("retry-interval", revInterval), zap.Error(err), ) } else { plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err) plog.Noticef("Retry after %v", revInterval) } } } }() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/proxy/grpcproxy/watcher.go#L55-L118
func (w *watcher) send(wr clientv3.WatchResponse) { if wr.IsProgressNotify() && !w.progress { return } if w.nextrev > wr.Header.Revision && len(wr.Events) > 0 { return } if w.nextrev == 0 { // current watch; expect updates following this revision w.nextrev = wr.Header.Revision + 1 } events := make([]*mvccpb.Event, 0, len(wr.Events)) var lastRev int64 for i := range wr.Events { ev := (*mvccpb.Event)(wr.Events[i]) if ev.Kv.ModRevision < w.nextrev { continue } else { // We cannot update w.rev here. // txn can have multiple events with the same rev. // If w.nextrev updates here, it would skip events in the same txn. lastRev = ev.Kv.ModRevision } filtered := false for _, filter := range w.filters { if filter(*ev) { filtered = true break } } if filtered { continue } if !w.prevKV { evCopy := *ev evCopy.PrevKv = nil ev = &evCopy } events = append(events, ev) } if lastRev >= w.nextrev { w.nextrev = lastRev + 1 } // all events are filtered out? if !wr.IsProgressNotify() && !wr.Created && len(events) == 0 && wr.CompactRevision == 0 { return } w.lastHeader = wr.Header w.post(&pb.WatchResponse{ Header: &wr.Header, Created: wr.Created, CompactRevision: wr.CompactRevision, Canceled: wr.Canceled, WatchId: w.id, Events: events, }) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/skl/skl.go#L90-L100
func (s *Skiplist) DecrRef() { newRef := atomic.AddInt32(&s.ref, -1) if newRef > 0 { return } s.arena.reset() // Indicate we are closed. Good for testing. Also, lets GC reclaim memory. Race condition // here would suggest we are accessing skiplist when we are supposed to have no reference! s.arena = nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/netutil/netutil.go#L173-L200
func URLStringsEqual(ctx context.Context, lg *zap.Logger, a []string, b []string) (bool, error) { if len(a) != len(b) { return false, fmt.Errorf("len(%q) != len(%q)", a, b) } urlsA := make([]url.URL, 0) for _, str := range a { u, err := url.Parse(str) if err != nil { return false, fmt.Errorf("failed to parse %q", str) } urlsA = append(urlsA, *u) } urlsB := make([]url.URL, 0) for _, str := range b { u, err := url.Parse(str) if err != nil { return false, fmt.Errorf("failed to parse %q", str) } urlsB = append(urlsB, *u) } if lg == nil { lg, _ = zap.NewProduction() if lg == nil { lg = zap.NewExample() } } return urlsEqual(ctx, lg, urlsA, urlsB) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/merge.go#L155-L167
func (op *MergeOperator) Get() ([]byte, error) { op.RLock() defer op.RUnlock() var existing []byte err := op.db.View(func(txn *Txn) (err error) { existing, err = op.iterateAndMerge(txn) return err }) if err == errNoMerge { return existing, nil } return existing, err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/contrib/raftexample/raft.go#L200-L224
func (rc *raftNode) openWAL(snapshot *raftpb.Snapshot) *wal.WAL { if !wal.Exist(rc.waldir) { if err := os.Mkdir(rc.waldir, 0750); err != nil { log.Fatalf("raftexample: cannot create dir for wal (%v)", err) } w, err := wal.Create(zap.NewExample(), rc.waldir, nil) if err != nil { log.Fatalf("raftexample: create wal error (%v)", err) } w.Close() } walsnap := walpb.Snapshot{} if snapshot != nil { walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term } log.Printf("loading WAL at term %d and index %d", walsnap.Term, walsnap.Index) w, err := wal.Open(zap.NewExample(), rc.waldir, walsnap) if err != nil { log.Fatalf("raftexample: error loading wal (%v)", err) } return w }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/skl/skl.go#L430-L433
func (s *Iterator) Next() { y.AssertTrue(s.Valid()) s.n = s.list.getNext(s.n, 0) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/structs.go#L99-L127
func encodeEntry(e *Entry, buf *bytes.Buffer) (int, error) { h := header{ klen: uint32(len(e.Key)), vlen: uint32(len(e.Value)), expiresAt: e.ExpiresAt, meta: e.meta, userMeta: e.UserMeta, } var headerEnc [headerBufSize]byte h.Encode(headerEnc[:]) hash := crc32.New(y.CastagnoliCrcTable) buf.Write(headerEnc[:]) hash.Write(headerEnc[:]) buf.Write(e.Key) hash.Write(e.Key) buf.Write(e.Value) hash.Write(e.Value) var crcBuf [crc32.Size]byte binary.BigEndian.PutUint32(crcBuf[:], hash.Sum32()) buf.Write(crcBuf[:]) return len(headerEnc) + len(e.Key) + len(e.Value) + len(crcBuf), nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/progress.go#L166-L177
func (pr *Progress) IsPaused() bool { switch pr.State { case ProgressStateProbe: return pr.Paused case ProgressStateReplicate: return pr.ins.full() case ProgressStateSnapshot: return true default: panic("unexpected state") } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/quota.go#L74-L135
func NewBackendQuota(s *EtcdServer, name string) Quota { lg := s.getLogger() quotaBackendBytes.Set(float64(s.Cfg.QuotaBackendBytes)) if s.Cfg.QuotaBackendBytes < 0 { // disable quotas if negative quotaLogOnce.Do(func() { if lg != nil { lg.Info( "disabled backend quota", zap.String("quota-name", name), zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), ) } else { plog.Warningf("disabling backend quota") } }) return &passthroughQuota{} } if s.Cfg.QuotaBackendBytes == 0 { // use default size if no quota size given quotaLogOnce.Do(func() { if lg != nil { lg.Info( "enabled backend quota with default value", zap.String("quota-name", name), zap.Int64("quota-size-bytes", DefaultQuotaBytes), zap.String("quota-size", DefaultQuotaSize), ) } }) quotaBackendBytes.Set(float64(DefaultQuotaBytes)) return &backendQuota{s, DefaultQuotaBytes} } quotaLogOnce.Do(func() { if s.Cfg.QuotaBackendBytes > MaxQuotaBytes { if lg != nil { lg.Warn( "quota exceeds the maximum value", zap.String("quota-name", name), zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), zap.Int64("quota-maximum-size-bytes", MaxQuotaBytes), zap.String("quota-maximum-size", maxQuotaSize), ) } else { plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes) } } if lg != nil { lg.Info( "enabled backend quota", zap.String("quota-name", name), zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), ) } }) return &backendQuota{s, s.Cfg.QuotaBackendBytes} }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/txn_command.go#L34-L42
func NewTxnCommand() *cobra.Command { cmd := &cobra.Command{ Use: "txn [options]", Short: "Txn processes all the requests in one transaction", Run: txnCommandFunc, } cmd.Flags().BoolVarP(&txnInteractive, "interactive", "i", false, "Input transaction in interactive mode") return cmd }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/netutil/routes_linux.go#L227-L250
func parsePREFSRC(m *syscall.NetlinkMessage) (host string, oif uint32, err error) { var attrs []syscall.NetlinkRouteAttr attrs, err = syscall.ParseNetlinkRouteAttr(m) if err != nil { return "", 0, err } for _, attr := range attrs { if attr.Attr.Type == syscall.RTA_PREFSRC { host = net.IP(attr.Value).String() } if attr.Attr.Type == syscall.RTA_OIF { oif = cpuutil.ByteOrder().Uint32(attr.Value) } if host != "" && oif != uint32(0) { break } } if oif == 0 { err = errNoDefaultRoute } return host, oif, err }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/inmem_transport.go#L100-L110
func (i *InmemTransport) AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { rpcResp, err := i.makeRPC(target, args, nil, i.timeout) if err != nil { return err } // Copy the result back out := rpcResp.Response.(*AppendEntriesResponse) *resp = *out return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/rafthttp/util.go#L184-L190
func addRemoteFromRequest(tr Transporter, r *http.Request) { if from, err := types.IDFromString(r.Header.Get("X-Server-From")); err == nil { if urls := r.Header.Get("X-PeerURLs"); urls != "" { tr.AddRemote(from, strings.Split(urls, ",")) } } }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/y.go#L128-L134
func CompareKeys(key1, key2 []byte) int { AssertTrue(len(key1) > 8 && len(key2) > 8) if cmp := bytes.Compare(key1[:len(key1)-8], key2[:len(key2)-8]); cmp != 0 { return cmp } return bytes.Compare(key1[len(key1)-8:], key2[len(key2)-8:]) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/log.go#L50-L52
func newLog(storage Storage, logger Logger) *raftLog { return newLogWithSize(storage, logger, noLimit) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/balancer/connectivity.go#L36-L58
func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { // Update counters. for idx, state := range []connectivity.State{oldState, newState} { updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. switch state { case connectivity.Ready: cse.numReady += updateVal case connectivity.Connecting: cse.numConnecting += updateVal case connectivity.TransientFailure: cse.numTransientFailure += updateVal } } // Evaluate. if cse.numReady > 0 { return connectivity.Ready } if cse.numConnecting > 0 { return connectivity.Connecting } return connectivity.TransientFailure }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/logutil/package_logger.go#L32-L34
func NewPackageLogger(repo, pkg string) Logger { return &packageLogger{p: capnslog.NewPackageLogger(repo, pkg)} }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/apply_v2.go#L116-L134
func (s *EtcdServer) applyV2Request(r *RequestV2) Response { defer warnOfExpensiveRequest(s.getLogger(), time.Now(), r, nil, nil) switch r.Method { case "POST": return s.applyV2.Post(r) case "PUT": return s.applyV2.Put(r) case "DELETE": return s.applyV2.Delete(r) case "QGET": return s.applyV2.QGet(r) case "SYNC": return s.applyV2.Sync(r) default: // This should never be reached, but just in case: return Response{Err: ErrUnknownMethod} } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/etcdserverpb/gw/rpc.pb.gw.go#L966-L968
func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { return RegisterLeaseHandlerClient(ctx, mux, etcdserverpb.NewLeaseClient(conn)) }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/inmem_store.go#L58-L60
func (i *InmemStore) StoreLog(log *Log) error { return i.StoreLogs([]*Log{log}) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/rawnode.go#L273-L275
func (rn *RawNode) ReportUnreachable(id uint64) { _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id}) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/skl/skl.go#L453-L455
func (s *Iterator) SeekToFirst() { s.n = s.list.getNext(s.list.head, 0) }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/observer.go#L80-L84
func (r *Raft) RegisterObserver(or *Observer) { r.observersLock.Lock() defer r.observersLock.Unlock() r.observers[or.id] = or }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/contrib/raftexample/raft.go#L227-L250
func (rc *raftNode) replayWAL() *wal.WAL { log.Printf("replaying WAL of member %d", rc.id) snapshot := rc.loadSnapshot() w := rc.openWAL(snapshot) _, st, ents, err := w.ReadAll() if err != nil { log.Fatalf("raftexample: failed to read WAL (%v)", err) } rc.raftStorage = raft.NewMemoryStorage() if snapshot != nil { rc.raftStorage.ApplySnapshot(*snapshot) } rc.raftStorage.SetHardState(st) // append to storage so raft starts at the right place in log rc.raftStorage.Append(ents) // send nil once lastIndex is published so client knows commit channel is current if len(ents) > 0 { rc.lastIndex = ents[len(ents)-1].Index } else { rc.commitC <- nil } return w }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/contrib/raftexample/raft.go#L81-L108
func newRaftNode(id int, peers []string, join bool, getSnapshot func() ([]byte, error), proposeC <-chan string, confChangeC <-chan raftpb.ConfChange) (<-chan *string, <-chan error, <-chan *snap.Snapshotter) { commitC := make(chan *string) errorC := make(chan error) rc := &raftNode{ proposeC: proposeC, confChangeC: confChangeC, commitC: commitC, errorC: errorC, id: id, peers: peers, join: join, waldir: fmt.Sprintf("raftexample-%d", id), snapdir: fmt.Sprintf("raftexample-%d-snap", id), getSnapshot: getSnapshot, snapCount: defaultSnapshotCount, stopc: make(chan struct{}), httpstopc: make(chan struct{}), httpdonec: make(chan struct{}), snapshotterReady: make(chan *snap.Snapshotter, 1), // rest of structure populated after WAL replay } go rc.startRaft() return commitC, errorC, rc.snapshotterReady }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/storage.go#L128-L139
func (ms *MemoryStorage) Term(i uint64) (uint64, error) { ms.Lock() defer ms.Unlock() offset := ms.ents[0].Index if i < offset { return 0, ErrCompacted } if int(i-offset) >= len(ms.ents) { return 0, ErrUnavailable } return ms.ents[i-offset].Term, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/embed/config_logging.go#L36-L41
func (cfg Config) GetLogger() *zap.Logger { cfg.loggerMu.RLock() l := cfg.logger cfg.loggerMu.RUnlock() return l }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/server.go#L1779-L1793
func (s *EtcdServer) sync(timeout time.Duration) { req := pb.Request{ Method: "SYNC", ID: s.reqIDGen.Next(), Time: time.Now().UnixNano(), } data := pbutil.MustMarshal(&req) // There is no promise that node has leader when do SYNC request, // so it uses goroutine to propose. ctx, cancel := context.WithTimeout(s.ctx, timeout) s.goAttach(func() { s.r.Propose(ctx, data) cancel() }) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/watcher_hub.go#L48-L53
func newWatchHub(capacity int) *watcherHub { return &watcherHub{ watchers: make(map[string]*list.List), EventHistory: newEventHistory(capacity), } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/balancer/resolver/endpoint/endpoint.go#L82-L90
func (e *ResolverGroup) SetEndpoints(endpoints []string) { addrs := epsToAddrs(endpoints...) e.mu.Lock() e.endpoints = endpoints for _, r := range e.resolvers { r.cc.NewAddress(addrs) } e.mu.Unlock() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/apply.go#L438-L462
func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { // TODO: possible optimizations // * chunk reads for large ranges to conserve memory // * rewrite rules for common patterns: // ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0" // * caching rr, err := rv.Range(c.Key, mkGteRange(c.RangeEnd), mvcc.RangeOptions{}) if err != nil { return false } if len(rr.KVs) == 0 { if c.Target == pb.Compare_VALUE { // Always fail if comparing a value on a key/keys that doesn't exist; // nil == empty string in grpc; no way to represent missing value return false } return compareKV(c, mvccpb.KeyValue{}) } for _, kv := range rr.KVs { if !compareKV(c, kv) { return false } } return true }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/op.go#L231-L261
func OpDelete(key string, opts ...OpOption) Op { // WithPrefix and WithFromKey are not supported together if isWithPrefix(opts) && isWithFromKey(opts) { panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one") } ret := Op{t: tDeleteRange, key: []byte(key)} ret.applyOpts(opts) switch { case ret.leaseID != 0: panic("unexpected lease in delete") case ret.limit != 0: panic("unexpected limit in delete") case ret.rev != 0: panic("unexpected revision in delete") case ret.sort != nil: panic("unexpected sort in delete") case ret.serializable: panic("unexpected serializable in delete") case ret.countOnly: panic("unexpected countOnly in delete") case ret.minModRev != 0, ret.maxModRev != 0: panic("unexpected mod revision filter in delete") case ret.minCreateRev != 0, ret.maxCreateRev != 0: panic("unexpected create revision filter in delete") case ret.filterDelete, ret.filterPut: panic("unexpected filter in delete") case ret.createdNotify: panic("unexpected createdNotify in delete") } return ret }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/future.go#L177-L188
func (u *userSnapshotFuture) Open() (*SnapshotMeta, io.ReadCloser, error) { if u.opener == nil { return nil, nil, fmt.Errorf("no snapshot available") } else { // Invalidate the opener so it can't get called multiple times, // which isn't generally safe. defer func() { u.opener = nil }() return u.opener() } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/rpcpb/member.go#L319-L361
func (m *Member) RestoreSnapshot(lg *zap.Logger) (err error) { if err = os.RemoveAll(m.EtcdOnSnapshotRestore.DataDir); err != nil { return err } if err = os.RemoveAll(m.EtcdOnSnapshotRestore.WALDir); err != nil { return err } lg.Info( "snapshot restore START", zap.String("member-name", m.Etcd.Name), zap.Strings("member-client-urls", m.Etcd.AdvertiseClientURLs), zap.String("snapshot-path", m.SnapshotPath), ) now := time.Now() mgr := snapshot.NewV3(lg) err = mgr.Restore(snapshot.RestoreConfig{ SnapshotPath: m.SnapshotInfo.SnapshotPath, Name: m.EtcdOnSnapshotRestore.Name, OutputDataDir: m.EtcdOnSnapshotRestore.DataDir, OutputWALDir: m.EtcdOnSnapshotRestore.WALDir, PeerURLs: m.EtcdOnSnapshotRestore.AdvertisePeerURLs, InitialCluster: m.EtcdOnSnapshotRestore.InitialCluster, InitialClusterToken: m.EtcdOnSnapshotRestore.InitialClusterToken, SkipHashCheck: false, // TODO: set SkipHashCheck it true, to recover from existing db file }) took := time.Since(now) lg.Info( "snapshot restore END", zap.String("member-name", m.SnapshotInfo.MemberName), zap.Strings("member-client-urls", m.SnapshotInfo.MemberClientURLs), zap.String("snapshot-path", m.SnapshotPath), zap.String("snapshot-file-size", m.SnapshotInfo.SnapshotFileSize), zap.String("snapshot-total-size", m.SnapshotInfo.SnapshotTotalSize), zap.Int64("snapshot-total-key", m.SnapshotInfo.SnapshotTotalKey), zap.Int64("snapshot-hash", m.SnapshotInfo.SnapshotHash), zap.Int64("snapshot-revision", m.SnapshotInfo.SnapshotRevision), zap.String("took", took.String()), zap.Error(err), ) return err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/put_command.go#L67-L77
func putCommandFunc(cmd *cobra.Command, args []string) { key, value, opts := getPutOp(args) ctx, cancel := commandCtx(cmd) resp, err := mustClientFromCmd(cmd).Put(ctx, key, value, opts...) cancel() if err != nil { ExitWithError(ExitError, err) } display.Put(*resp) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/watchable_store.go#L409-L433
func kvsToEvents(lg *zap.Logger, wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) { for i, v := range vals { var kv mvccpb.KeyValue if err := kv.Unmarshal(v); err != nil { if lg != nil { lg.Panic("failed to unmarshal mvccpb.KeyValue", zap.Error(err)) } else { plog.Panicf("cannot unmarshal event: %v", err) } } if !wg.contains(string(kv.Key)) { continue } ty := mvccpb.PUT if isTombstone(revs[i]) { ty = mvccpb.DELETE // patch in mod revision so watchers won't skip kv.ModRevision = bytesToRev(revs[i]).main } evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty}) } return evs }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/wal/file_pipeline.go#L58-L64
func (fp *filePipeline) Open() (f *fileutil.LockedFile, err error) { select { case f = <-fp.filec: case err = <-fp.errc: } return f, err }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/level_handler.go#L78-L101
func (s *levelHandler) deleteTables(toDel []*table.Table) error { s.Lock() // s.Unlock() below toDelMap := make(map[uint64]struct{}) for _, t := range toDel { toDelMap[t.ID()] = struct{}{} } // Make a copy as iterators might be keeping a slice of tables. var newTables []*table.Table for _, t := range s.tables { _, found := toDelMap[t.ID()] if !found { newTables = append(newTables, t) continue } s.totalSize -= t.Size() } s.tables = newTables s.Unlock() // Unlock s _before_ we DecrRef our tables, which can be slow. return decrRefs(toDel) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/flags/strings.go#L28-L31
func (ss *StringsValue) Set(s string) error { *ss = strings.Split(s, ",") return nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/table/iterator.go#L111-L128
func (itr *blockIterator) parseKV(h header) { if cap(itr.key) < int(h.plen+h.klen) { sz := int(h.plen) + int(h.klen) // Convert to int before adding to avoid uint16 overflow. itr.key = make([]byte, 2*sz) } itr.key = itr.key[:h.plen+h.klen] copy(itr.key, itr.baseKey[:h.plen]) copy(itr.key[h.plen:], itr.data[itr.pos:itr.pos+uint32(h.klen)]) itr.pos += uint32(h.klen) if itr.pos+uint32(h.vlen) > uint32(len(itr.data)) { itr.err = errors.Errorf("Value exceeded size of block: %d %d %d %d %v", itr.pos, h.klen, h.vlen, len(itr.data), h) return } itr.val = y.SafeCopy(itr.val, itr.data[itr.pos:itr.pos+uint32(h.vlen)]) itr.pos += uint32(h.vlen) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/lease.go#L494-L533
func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { karesp := &LeaseKeepAliveResponse{ ResponseHeader: resp.GetHeader(), ID: LeaseID(resp.ID), TTL: resp.TTL, } l.mu.Lock() defer l.mu.Unlock() ka, ok := l.keepAlives[karesp.ID] if !ok { return } if karesp.TTL <= 0 { // lease expired; close all keep alive channels delete(l.keepAlives, karesp.ID) ka.close() return } // send update to all channels nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0) ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second) for _, ch := range ka.chs { select { case ch <- karesp: default: if l.lg != nil { l.lg.Warn("lease keepalive response queue is full; dropping response send", zap.Int("queue-size", len(ch)), zap.Int("queue-capacity", cap(ch)), ) } } // still advance in order to rate-limit keep-alive sends ka.nextKeepAlive = nextKeepAlive } }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/value.go#L1024-L1041
func (vlog *valueLog) Read(vp valuePointer, s *y.Slice) ([]byte, func(), error) { // Check for valid offset if we are reading to writable log. maxFid := atomic.LoadUint32(&vlog.maxFid) if vp.Fid == maxFid && vp.Offset >= vlog.woffset() { return nil, nil, errors.Errorf( "Invalid value pointer offset: %d greater than current offset: %d", vp.Offset, vlog.woffset()) } buf, cb, err := vlog.readValueBytes(vp, s) if err != nil { return nil, cb, err } var h header h.Decode(buf) n := uint32(headerBufSize) + h.klen return buf[n : n+h.vlen], cb, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/integration/cluster.go#L1206-L1227
func NewClusterV3(t testing.TB, cfg *ClusterConfig) *ClusterV3 { cfg.UseGRPC = true if os.Getenv("CLIENT_DEBUG") != "" { clientv3.SetLogger(grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4)) } clus := &ClusterV3{ cluster: NewClusterByConfig(t, cfg), } clus.Launch(t) if !cfg.SkipCreatingClient { for _, m := range clus.Members { client, err := NewClientV3(m) if err != nil { t.Fatalf("cannot create client: %v", err) } clus.clients = append(clus.clients, client) } } return clus }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/levels.go#L388-L417
func (s *levelsController) pickCompactLevels() (prios []compactionPriority) { // This function must use identical criteria for guaranteeing compaction's progress that // addLevel0Table uses. // cstatus is checked to see if level 0's tables are already being compacted if !s.cstatus.overlapsWith(0, infRange) && s.isLevel0Compactable() { pri := compactionPriority{ level: 0, score: float64(s.levels[0].numTables()) / float64(s.kv.opt.NumLevelZeroTables), } prios = append(prios, pri) } for i, l := range s.levels[1:] { // Don't consider those tables that are already being compacted right now. delSize := s.cstatus.delSize(i + 1) if l.isCompactable(delSize) { pri := compactionPriority{ level: i + 1, score: float64(l.getTotalSize()-delSize) / float64(l.maxTotalSize), } prios = append(prios, pri) } } sort.Slice(prios, func(i, j int) bool { return prios[i].score > prios[j].score }) return prios }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/api.go#L666-L676
func (r *Raft) VerifyLeader() Future { metrics.IncrCounter([]string{"raft", "verify_leader"}, 1) verifyFuture := &verifyFuture{} verifyFuture.init() select { case <-r.shutdownCh: return errorFuture{ErrRaftShutdown} case r.verifyCh <- verifyFuture: return verifyFuture } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/membership/cluster.go#L358-L392
func (c *RaftCluster) RemoveMember(id types.ID) { c.Lock() defer c.Unlock() if c.v2store != nil { mustDeleteMemberFromStore(c.v2store, id) } if c.be != nil { mustDeleteMemberFromBackend(c.be, id) } m, ok := c.members[id] delete(c.members, id) c.removed[id] = true if c.lg != nil { if ok { c.lg.Info( "removed member", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("removed-remote-peer-id", id.String()), zap.Strings("removed-remote-peer-urls", m.PeerURLs), ) } else { c.lg.Warn( "skipped removing already removed member", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("removed-remote-peer-id", id.String()), ) } } else { plog.Infof("removed member %s from cluster %s", id, c.cid) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/transport/timeout_transport.go#L27-L51
func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) { tr, err := NewTransport(info, dialtimeoutd) if err != nil { return nil, err } if rdtimeoutd != 0 || wtimeoutd != 0 { // the timed out connection will timeout soon after it is idle. // it should not be put back to http transport as an idle connection for future usage. tr.MaxIdleConnsPerHost = -1 } else { // allow more idle connections between peers to avoid unnecessary port allocation. tr.MaxIdleConnsPerHost = 1024 } tr.Dial = (&rwTimeoutDialer{ Dialer: net.Dialer{ Timeout: dialtimeoutd, KeepAlive: 30 * time.Second, }, rdtimeoutd: rdtimeoutd, wtimeoutd: wtimeoutd, }).Dial return tr, nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/api.go#L353-L385
func HasExistingState(logs LogStore, stable StableStore, snaps SnapshotStore) (bool, error) { // Make sure we don't have a current term. currentTerm, err := stable.GetUint64(keyCurrentTerm) if err == nil { if currentTerm > 0 { return true, nil } } else { if err.Error() != "not found" { return false, fmt.Errorf("failed to read current term: %v", err) } } // Make sure we have an empty log. lastIndex, err := logs.LastIndex() if err != nil { return false, fmt.Errorf("failed to get last log index: %v", err) } if lastIndex > 0 { return true, nil } // Make sure we have no snapshots snapshots, err := snaps.List() if err != nil { return false, fmt.Errorf("failed to list snapshots: %v", err) } if len(snapshots) > 0 { return true, nil } return false, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/contrib/raftexample/httpapi.go#L105-L123
func serveHttpKVAPI(kv *kvstore, port int, confChangeC chan<- raftpb.ConfChange, errorC <-chan error) { srv := http.Server{ Addr: ":" + strconv.Itoa(port), Handler: &httpKVAPI{ store: kv, confChangeC: confChangeC, }, } go func() { if err := srv.ListenAndServe(); err != nil { log.Fatal(err) } }() // exit when raft goes down if err, ok := <-errorC; ok { log.Fatal(err) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/lease/leasehttp/http.go#L193-L242
func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string, rt http.RoundTripper) (*leasepb.LeaseInternalResponse, error) { // will post lreq protobuf to leader lreq, err := (&leasepb.LeaseInternalRequest{ LeaseTimeToLiveRequest: &pb.LeaseTimeToLiveRequest{ ID: int64(id), Keys: keys, }, }).Marshal() if err != nil { return nil, err } req, err := http.NewRequest("POST", url, bytes.NewReader(lreq)) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/protobuf") req = req.WithContext(ctx) cc := &http.Client{Transport: rt} var b []byte // buffer errc channel so that errc don't block inside the go routinue resp, err := cc.Do(req) if err != nil { return nil, err } b, err = readResponse(resp) if err != nil { return nil, err } if resp.StatusCode == http.StatusRequestTimeout { return nil, ErrLeaseHTTPTimeout } if resp.StatusCode == http.StatusNotFound { return nil, lease.ErrLeaseNotFound } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("lease: unknown error(%s)", string(b)) } lresp := &leasepb.LeaseInternalResponse{} if err := lresp.Unmarshal(b); err != nil { return nil, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b)) } if lresp.LeaseTimeToLiveResponse.ID != int64(id) { return nil, fmt.Errorf("lease: renew id mismatch") } return lresp, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/log.go#L306-L344
func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) { err := l.mustCheckOutOfBounds(lo, hi) if err != nil { return nil, err } if lo == hi { return nil, nil } var ents []pb.Entry if lo < l.unstable.offset { storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize) if err == ErrCompacted { return nil, err } else if err == ErrUnavailable { l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset)) } else if err != nil { panic(err) // TODO(bdarnell) } // check if ents has reached the size limitation if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo { return storedEnts, nil } ents = storedEnts } if hi > l.unstable.offset { unstable := l.unstable.slice(max(lo, l.unstable.offset), hi) if len(ents) > 0 { combined := make([]pb.Entry, len(ents)+len(unstable)) n := copy(combined, ents) copy(combined[n:], unstable) ents = combined } else { ents = unstable } } return limitSize(ents, maxSize), nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/badger/cmd/bank.go#L181-L210
func seekTotal(txn *badger.Txn) ([]account, error) { expected := uint64(numAccounts) * uint64(initialBal) var accounts []account var total uint64 for i := 0; i < numAccounts; i++ { item, err := txn.Get(key(i)) if err != nil { log.Printf("Error for account: %d. err=%v. key=%q\n", i, err, key(i)) return accounts, err } val, err := item.ValueCopy(nil) if err != nil { return accounts, err } acc := account{ Id: i, Bal: toUint64(val), } accounts = append(accounts, acc) total += acc.Bal } if total != expected { log.Printf("Balance did NOT match up. Expected: %d. Received: %d", expected, total) atomic.AddInt32(&stopAll, 1) return accounts, errFailure } return accounts, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/user_command.go#L216-L235
func userChangePasswordCommandFunc(cmd *cobra.Command, args []string) { if len(args) != 1 { ExitWithError(ExitBadArgs, fmt.Errorf("user passwd command requires user name as its argument")) } var password string if !passwordInteractive { fmt.Scanf("%s", &password) } else { password = readPasswordInteractive(args[0]) } resp, err := mustClientFromCmd(cmd).Auth.UserChangePassword(context.TODO(), args[0], password) if err != nil { ExitWithError(ExitError, err) } display.UserChangePassword(*resp) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2http/client.go#L740-L745
func getUint64(form url.Values, key string) (i uint64, err error) { if vals, ok := form[key]; ok { i, err = strconv.ParseUint(vals[0], 10, 64) } return }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/log_unstable.go#L96-L109
func (u *unstable) shrinkEntriesArray() { // We replace the array if we're using less than half of the space in // it. This number is fairly arbitrary, chosen as an attempt to balance // memory usage vs number of allocations. It could probably be improved // with some focused tuning. const lenMultiple = 2 if len(u.entries) == 0 { u.entries = nil } else if len(u.entries)*lenMultiple < cap(u.entries) { newEntries := make([]pb.Entry, len(u.entries)) copy(newEntries, u.entries) u.entries = newEntries } }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/histogram.go#L90-L114
func (histogram *histogramData) Update(value int64) { if value > histogram.max { histogram.max = value } if value < histogram.min { histogram.min = value } histogram.sum += value histogram.totalCount++ for index := 0; index <= len(histogram.bins); index++ { // Allocate value in the last buckets if we reached the end of the Bounds array. if index == len(histogram.bins) { histogram.countPerBin[index]++ break } // Check if the value should be added to the "index" bin if value < int64(histogram.bins[index]) { histogram.countPerBin[index]++ break } } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/raft.go#L1552-L1570
func (r *raft) reduceUncommittedSize(ents []pb.Entry) { if r.uncommittedSize == 0 { // Fast-path for followers, who do not track or enforce the limit. return } var s uint64 for _, e := range ents { s += uint64(PayloadSize(e)) } if s > r.uncommittedSize { // uncommittedSize may underestimate the size of the uncommitted Raft // log tail but will never overestimate it. Saturate at 0 instead of // allowing overflow. r.uncommittedSize = 0 } else { r.uncommittedSize -= s } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/v2_server.go#L141-L160
func (r *RequestV2) Handle(ctx context.Context, v2api RequestV2Handler) (Response, error) { if r.Method == "GET" && r.Quorum { r.Method = "QGET" } switch r.Method { case "POST": return v2api.Post(ctx, r) case "PUT": return v2api.Put(ctx, r) case "DELETE": return v2api.Delete(ctx, r) case "QGET": return v2api.QGet(ctx, r) case "GET": return v2api.Get(ctx, r) case "HEAD": return v2api.Head(ctx, r) } return Response{}, ErrUnknownMethod }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/transport/listener.go#L237-L321
func (info TLSInfo) baseConfig() (*tls.Config, error) { if info.KeyFile == "" || info.CertFile == "" { return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile) } if info.Logger == nil { info.Logger = zap.NewNop() } _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) if err != nil { return nil, err } cfg := &tls.Config{ MinVersion: tls.VersionTLS12, ServerName: info.ServerName, } if len(info.CipherSuites) > 0 { cfg.CipherSuites = info.CipherSuites } if info.AllowedCN != "" { cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { for _, chains := range verifiedChains { if len(chains) != 0 { if info.AllowedCN == chains[0].Subject.CommonName { return nil } } } return errors.New("CommonName authentication failed") } } // this only reloads certs when there's a client request // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) { cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) if os.IsNotExist(err) { if info.Logger != nil { info.Logger.Warn( "failed to find peer cert files", zap.String("cert-file", info.CertFile), zap.String("key-file", info.KeyFile), zap.Error(err), ) } } else if err != nil { if info.Logger != nil { info.Logger.Warn( "failed to create peer certificate", zap.String("cert-file", info.CertFile), zap.String("key-file", info.KeyFile), zap.Error(err), ) } } return cert, err } cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (cert *tls.Certificate, err error) { cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) if os.IsNotExist(err) { if info.Logger != nil { info.Logger.Warn( "failed to find client cert files", zap.String("cert-file", info.CertFile), zap.String("key-file", info.KeyFile), zap.Error(err), ) } } else if err != nil { if info.Logger != nil { info.Logger.Warn( "failed to create client certificate", zap.String("cert-file", info.CertFile), zap.String("key-file", info.KeyFile), zap.Error(err), ) } } return cert, err } return cfg, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/balancer/resolver/endpoint/endpoint.go#L172-L178
func epsToAddrs(eps ...string) (addrs []resolver.Address) { addrs = make([]resolver.Address, 0, len(eps)) for _, ep := range eps { addrs = append(addrs, resolver.Address{Addr: ep}) } return addrs }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/histogram.go#L138-L169
func (histogram histogramData) printHistogram() { fmt.Printf("Total count: %d\n", histogram.totalCount) fmt.Printf("Min value: %d\n", histogram.min) fmt.Printf("Max value: %d\n", histogram.max) fmt.Printf("Mean: %.2f\n", float64(histogram.sum)/float64(histogram.totalCount)) fmt.Printf("%24s %9s\n", "Range", "Count") numBins := len(histogram.bins) for index, count := range histogram.countPerBin { if count == 0 { continue } // The last bin represents the bin that contains the range from // the last bin up to infinity so it's processed differently than the // other bins. if index == len(histogram.countPerBin)-1 { lowerBound := int(histogram.bins[numBins-1]) fmt.Printf("[%10d, %10s) %9d\n", lowerBound, "infinity", count) continue } upperBound := int(histogram.bins[index]) lowerBound := 0 if index > 0 { lowerBound = int(histogram.bins[index-1]) } fmt.Printf("[%10d, %10d) %9d\n", lowerBound, upperBound, count) } fmt.Println() }