id
stringlengths
95
167
text
stringlengths
69
15.9k
title
stringclasses
1 value
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/inmem_snapshot.go#L66-L74
func (m *InmemSnapshotStore) List() ([]*SnapshotMeta, error) { m.RLock() defer m.RUnlock() if !m.hasSnapshot { return []*SnapshotMeta{}, nil } return []*SnapshotMeta{&m.latest.meta}, nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/table/iterator.go#L470-L472
func (s *ConcatIterator) Valid() bool { return s.cur != nil && s.cur.Valid() }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/api.go#L907-L912
func (r *Raft) LastContact() time.Time { r.lastContactLock.RLock() last := r.lastContact r.lastContactLock.RUnlock() return last }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/utils.go#L37-L49
func isOpFuncCalled(op string, opts []OpOption) bool { for _, opt := range opts { v := reflect.ValueOf(opt) if v.Kind() == reflect.Func { if opFunc := runtime.FuncForPC(v.Pointer()); opFunc != nil { if strings.Contains(opFunc.Name(), op) { return true } } } } return false }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/fileutil/fileutil.go#L38-L44
func IsDirWriteable(dir string) error { f := filepath.Join(dir, ".touch") if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil { return err } return os.Remove(f) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/election.go#L110-L129
func (e *Election) Proclaim(ctx context.Context, val string) error { if e.leaderSession == nil { return ErrElectionNotLeader } client := e.session.Client() cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) txn := client.Txn(ctx).If(cmp) txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease()))) tresp, terr := txn.Commit() if terr != nil { return terr } if !tresp.Succeeded { e.leaderKey = "" return ErrElectionNotLeader } e.hdr = tresp.Header return nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/inmem_transport.go#L175-L177
func (i *InmemTransport) EncodePeer(id ServerID, p ServerAddress) []byte { return []byte(p) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/watermark.go#L140-L233
func (w *WaterMark) process(closer *Closer) { defer closer.Done() var indices uint64Heap // pending maps raft proposal index to the number of pending mutations for this proposal. pending := make(map[uint64]int) waiters := make(map[uint64][]chan struct{}) heap.Init(&indices) var loop uint64 processOne := func(index uint64, done bool) { // If not already done, then set. Otherwise, don't undo a done entry. prev, present := pending[index] if !present { heap.Push(&indices, index) } delta := 1 if done { delta = -1 } pending[index] = prev + delta loop++ if len(indices) > 0 && loop%10000 == 0 { min := indices[0] w.elog.Printf("WaterMark %s: Done entry %4d. Size: %4d Watermark: %-4d Looking for: %-4d. Value: %d\n", w.Name, index, len(indices), w.DoneUntil(), min, pending[min]) } // Update mark by going through all indices in order; and checking if they have // been done. Stop at the first index, which isn't done. doneUntil := w.DoneUntil() if doneUntil > index { AssertTruef(false, "Name: %s doneUntil: %d. Index: %d", w.Name, doneUntil, index) } until := doneUntil loops := 0 for len(indices) > 0 { min := indices[0] if done := pending[min]; done > 0 { break // len(indices) will be > 0. } // Even if done is called multiple times causing it to become // negative, we should still pop the index. heap.Pop(&indices) delete(pending, min) until = min loops++ } for i := doneUntil + 1; i <= until; i++ { toNotify := waiters[i] for _, ch := range toNotify { close(ch) } delete(waiters, i) // Release the memory back. } if until != doneUntil { AssertTrue(atomic.CompareAndSwapUint64(&w.doneUntil, doneUntil, until)) w.elog.Printf("%s: Done until %d. Loops: %d\n", w.Name, until, loops) } } for { select { case <-closer.HasBeenClosed(): return case mark := <-w.markCh: if mark.waiter != nil { doneUntil := atomic.LoadUint64(&w.doneUntil) if doneUntil >= mark.index { close(mark.waiter) } else { ws, ok := waiters[mark.index] if !ok { waiters[mark.index] = []chan struct{}{mark.waiter} } else { waiters[mark.index] = append(ws, mark.waiter) } } } else { if mark.index > 0 { processOne(mark.index, mark.done) } for _, index := range mark.indices { processOne(index, mark.done) } } } } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/election.go#L49-L57
func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election { return &Election{ keyPrefix: pfx, session: s, leaderKey: leaderKey, leaderRev: leaderRev, leaderSession: s, } }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/levels.go#L201-L207
func closeAllTables(tables [][]*table.Table) { for _, tableSlice := range tables { for _, table := range tableSlice { _ = table.Close() } } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/ioutil/readcloser.go#L36-L38
func NewExactReadCloser(rc io.ReadCloser, totalBytes int64) io.ReadCloser { return &exactReadCloser{rc: rc, totalBytes: totalBytes} }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/lease/lessor.go#L848-L856
func (l *Lease) Keys() []string { l.mu.RLock() keys := make([]string, 0, len(l.itemSet)) for k := range l.itemSet { keys = append(keys, k.Key) } l.mu.RUnlock() return keys }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/node.go#L115-L123
func (rd Ready) appliedCursor() uint64 { if n := len(rd.CommittedEntries); n > 0 { return rd.CommittedEntries[n-1].Index } if index := rd.Snapshot.Metadata.Index; index > 0 { return index } return 0 }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/flags/urls.go#L51-L60
func NewURLsValue(s string) *URLsValue { if s == "" { return &URLsValue{} } v := &URLsValue{} if err := v.Set(s); err != nil { plog.Panicf("new URLsValue should never fail: %v", err) } return v }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/del_command.go#L45-L54
func delCommandFunc(cmd *cobra.Command, args []string) { key, opts := getDelOp(args) ctx, cancel := commandCtx(cmd) resp, err := mustClientFromCmd(cmd).Delete(ctx, key, opts...) cancel() if err != nil { ExitWithError(ExitError, err) } display.Del(*resp) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/rafthttp/util.go#L40-L42
func NewListener(u url.URL, tlsinfo *transport.TLSInfo) (net.Listener, error) { return transport.NewTimeoutListener(u.Host, u.Scheme, tlsinfo, ConnReadTimeout, ConnWriteTimeout) }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/inmem_transport.go#L126-L136
func (i *InmemTransport) InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { rpcResp, err := i.makeRPC(target, args, data, 10*i.timeout) if err != nil { return err } // Copy the result back out := rpcResp.Response.(*InstallSnapshotResponse) *resp = *out return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/schedule/schedule.go#L75-L90
func (f *fifo) Schedule(j Job) { f.mu.Lock() defer f.mu.Unlock() if f.cancel == nil { panic("schedule: schedule to stopped scheduler") } if len(f.pendings) == 0 { select { case f.resume <- struct{}{}: default: } } f.pendings = append(f.pendings, j) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/flags/urls.go#L31-L38
func (us *URLsValue) Set(s string) error { ss, err := types.NewURLs(strings.Split(s, ",")) if err != nil { return err } *us = URLsValue(ss) return nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/table/iterator.go#L275-L312
func (itr *Iterator) seekFrom(key []byte, whence int) { itr.err = nil switch whence { case origin: itr.reset() case current: } idx := sort.Search(len(itr.t.blockIndex), func(idx int) bool { ko := itr.t.blockIndex[idx] return y.CompareKeys(ko.key, key) > 0 }) if idx == 0 { // The smallest key in our table is already strictly > key. We can return that. // This is like a SeekToFirst. itr.seekHelper(0, key) return } // block[idx].smallest is > key. // Since idx>0, we know block[idx-1].smallest is <= key. // There are two cases. // 1) Everything in block[idx-1] is strictly < key. In this case, we should go to the first // element of block[idx]. // 2) Some element in block[idx-1] is >= key. We should go to that element. itr.seekHelper(idx-1, key) if itr.err == io.EOF { // Case 1. Need to visit block[idx]. if idx == len(itr.t.blockIndex) { // If idx == len(itr.t.blockIndex), then input key is greater than ANY element of table. // There's nothing we can do. Valid() should return false as we seek to end of table. return } // Since block[idx].smallest is > key. This is essentially a block[idx].SeekToFirst. itr.seekHelper(idx, key) } // Case 2: No need to do anything. We already did the seek in block[idx-1]. }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/role_command.go#L31-L45
func NewRoleCommand() *cobra.Command { ac := &cobra.Command{ Use: "role <subcommand>", Short: "Role related commands", } ac.AddCommand(newRoleAddCommand()) ac.AddCommand(newRoleDeleteCommand()) ac.AddCommand(newRoleGetCommand()) ac.AddCommand(newRoleListCommand()) ac.AddCommand(newRoleGrantPermissionCommand()) ac.AddCommand(newRoleRevokePermissionCommand()) return ac }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/kvstore.go#L111-L150
func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store { s := &store{ b: b, ig: ig, kvindex: newTreeIndex(lg), le: le, currentRev: 1, compactMainRev: -1, bytesBuf8: make([]byte, 8), fifoSched: schedule.NewFIFOScheduler(), stopc: make(chan struct{}), lg: lg, } s.ReadView = &readView{s} s.WriteView = &writeView{s} if s.le != nil { s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) } tx := s.b.BatchTx() tx.Lock() tx.UnsafeCreateBucket(keyBucketName) tx.UnsafeCreateBucket(metaBucketName) tx.Unlock() s.b.ForceCommit() s.mu.Lock() defer s.mu.Unlock() if err := s.restore(); err != nil { // TODO: return the error instead of panic here? panic("failed to recover store from backend") } return s }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/backend/batch_tx.go#L195-L197
func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error { return unsafeForEach(t.tx, bucketName, visitor) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/snapshot/v3_snapshot.go#L239-L309
func (s *v3Manager) Restore(cfg RestoreConfig) error { pURLs, err := types.NewURLs(cfg.PeerURLs) if err != nil { return err } var ics types.URLsMap ics, err = types.NewURLsMap(cfg.InitialCluster) if err != nil { return err } srv := etcdserver.ServerConfig{ Logger: s.lg, Name: cfg.Name, PeerURLs: pURLs, InitialPeerURLsMap: ics, InitialClusterToken: cfg.InitialClusterToken, } if err = srv.VerifyBootstrap(); err != nil { return err } s.cl, err = membership.NewClusterFromURLsMap(s.lg, cfg.InitialClusterToken, ics) if err != nil { return err } dataDir := cfg.OutputDataDir if dataDir == "" { dataDir = cfg.Name + ".etcd" } if fileutil.Exist(dataDir) { return fmt.Errorf("data-dir %q exists", dataDir) } walDir := cfg.OutputWALDir if walDir == "" { walDir = filepath.Join(dataDir, "member", "wal") } else if fileutil.Exist(walDir) { return fmt.Errorf("wal-dir %q exists", walDir) } s.name = cfg.Name s.dbPath = cfg.SnapshotPath s.walDir = walDir s.snapDir = filepath.Join(dataDir, "member", "snap") s.skipHashCheck = cfg.SkipHashCheck s.lg.Info( "restoring snapshot", zap.String("path", s.dbPath), zap.String("wal-dir", s.walDir), zap.String("data-dir", dataDir), zap.String("snap-dir", s.snapDir), ) if err = s.saveDB(); err != nil { return err } if err = s.saveWALAndSnap(); err != nil { return err } s.lg.Info( "restored snapshot", zap.String("path", s.dbPath), zap.String("wal-dir", s.walDir), zap.String("data-dir", dataDir), zap.String("snap-dir", s.snapDir), ) return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/put_command.go#L34-L64
func NewPutCommand() *cobra.Command { cmd := &cobra.Command{ Use: "put [options] <key> <value> (<value> can also be given from stdin)", Short: "Puts the given key into the store", Long: ` Puts the given key into the store. When <value> begins with '-', <value> is interpreted as a flag. Insert '--' for workaround: $ put <key> -- <value> $ put -- <key> <value> If <value> isn't given as a command line argument and '--ignore-value' is not specified, this command tries to read the value from standard input. If <lease> isn't given as a command line argument and '--ignore-lease' is not specified, this command tries to read the value from standard input. For example, $ cat file | put <key> will store the content of the file to <key>. `, Run: putCommandFunc, } cmd.Flags().StringVar(&leaseStr, "lease", "0", "lease ID (in hexadecimal) to attach to the key") cmd.Flags().BoolVar(&putPrevKV, "prev-kv", false, "return the previous key-value pair before modification") cmd.Flags().BoolVar(&putIgnoreVal, "ignore-value", false, "updates the key using its current value") cmd.Flags().BoolVar(&putIgnoreLease, "ignore-lease", false, "updates the key using its current lease") return cmd }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/session.go#L115-L121
func WithTTL(ttl int) SessionOption { return func(so *sessionOptions) { if ttl > 0 { so.ttl = ttl } } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/store.go#L671-L703
func (s *store) DeleteExpiredKeys(cutoff time.Time) { s.worldLock.Lock() defer s.worldLock.Unlock() for { node := s.ttlKeyHeap.top() if node == nil || node.ExpireTime.After(cutoff) { break } s.CurrentIndex++ e := newEvent(Expire, node.Path, s.CurrentIndex, node.CreatedIndex) e.EtcdIndex = s.CurrentIndex e.PrevNode = node.Repr(false, false, s.clock) if node.IsDir() { e.Node.Dir = true } callback := func(path string) { // notify function // notify the watchers with deleted set true s.WatcherHub.notifyWatchers(e, path, true) } s.ttlKeyHeap.pop() node.Remove(true, true, callback) reportExpiredKey() s.Stats.Inc(ExpireCount) s.WatcherHub.notify(e) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/member_command.go#L99-L168
func memberAddCommandFunc(cmd *cobra.Command, args []string) { if len(args) < 1 { ExitWithError(ExitBadArgs, errors.New("member name not provided")) } if len(args) > 1 { ev := "too many arguments" for _, s := range args { if strings.HasPrefix(strings.ToLower(s), "http") { ev += fmt.Sprintf(`, did you mean --peer-urls=%s`, s) } } ExitWithError(ExitBadArgs, errors.New(ev)) } newMemberName := args[0] if len(memberPeerURLs) == 0 { ExitWithError(ExitBadArgs, errors.New("member peer urls not provided")) } urls := strings.Split(memberPeerURLs, ",") ctx, cancel := commandCtx(cmd) cli := mustClientFromCmd(cmd) resp, err := cli.MemberAdd(ctx, urls) cancel() if err != nil { ExitWithError(ExitError, err) } newID := resp.Member.ID display.MemberAdd(*resp) if _, ok := (display).(*simplePrinter); ok { ctx, cancel = commandCtx(cmd) listResp, err := cli.MemberList(ctx) // get latest member list; if there's failover new member might have outdated list for { if err != nil { ExitWithError(ExitError, err) } if listResp.Header.MemberId == resp.Header.MemberId { break } // quorum get to sync cluster list gresp, gerr := cli.Get(ctx, "_") if gerr != nil { ExitWithError(ExitError, err) } resp.Header.MemberId = gresp.Header.MemberId listResp, err = cli.MemberList(ctx) } cancel() conf := []string{} for _, memb := range listResp.Members { for _, u := range memb.PeerURLs { n := memb.Name if memb.ID == newID { n = newMemberName } conf = append(conf, fmt.Sprintf("%s=%s", n, u)) } } fmt.Print("\n") fmt.Printf("ETCD_NAME=%q\n", newMemberName) fmt.Printf("ETCD_INITIAL_CLUSTER=%q\n", strings.Join(conf, ",")) fmt.Printf("ETCD_INITIAL_ADVERTISE_PEER_URLS=%q\n", memberPeerURLs) fmt.Printf("ETCD_INITIAL_CLUSTER_STATE=\"existing\"\n") } }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/levels.go#L798-L838
func (s *levelsController) doCompact(p compactionPriority) error { l := p.level y.AssertTrue(l+1 < s.kv.opt.MaxLevels) // Sanity check. cd := compactDef{ elog: trace.New(fmt.Sprintf("Badger.L%d", l), "Compact"), thisLevel: s.levels[l], nextLevel: s.levels[l+1], dropPrefix: p.dropPrefix, } cd.elog.SetMaxEvents(100) defer cd.elog.Finish() s.kv.opt.Infof("Got compaction priority: %+v", p) // While picking tables to be compacted, both levels' tables are expected to // remain unchanged. if l == 0 { if !s.fillTablesL0(&cd) { return errFillTables } } else { if !s.fillTables(&cd) { return errFillTables } } defer s.cstatus.delete(cd) // Remove the ranges from compaction status. s.kv.opt.Infof("Running for level: %d\n", cd.thisLevel.level) s.cstatus.toLog(cd.elog) if err := s.runCompactDef(l, cd); err != nil { // This compaction couldn't be done successfully. s.kv.opt.Warningf("LOG Compact FAILED with error: %+v: %+v", err, cd) return err } s.cstatus.toLog(cd.elog) s.kv.opt.Infof("Compaction for level: %d DONE", cd.thisLevel.level) return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/agent/handler.go#L694-L720
func (srv *Server) handle_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT() (*rpcpb.Response, error) { err := srv.stopEtcd(syscall.SIGQUIT) if err != nil { return nil, err } if srv.etcdServer != nil { srv.etcdServer.GetLogger().Sync() } else { srv.etcdLogFile.Sync() srv.etcdLogFile.Close() } err = os.RemoveAll(srv.Member.BaseDir) if err != nil { return nil, err } srv.lg.Info("removed base directory", zap.String("dir", srv.Member.BaseDir)) // stop agent server srv.Stop() return &rpcpb.Response{ Success: true, Status: "destroyed etcd and agent", }, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/logutil/zap_raft.go#L40-L44
func NewRaftLoggerFromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) raft.Logger { // "AddCallerSkip" to annotate caller outside of "logutil" lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer)) return &zapRaftLogger{lg: lg, sugar: lg.Sugar()} }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/batch.go#L98-L101
func (wb *WriteBatch) Set(k, v []byte, meta byte) error { e := &Entry{Key: k, Value: v, UserMeta: meta} return wb.SetEntry(e) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/rafthttp/peer.go#L358-L370
func (p *peer) pick(m raftpb.Message) (writec chan<- raftpb.Message, picked string) { var ok bool // Considering MsgSnap may have a big size, e.g., 1G, and will block // stream for a long time, only use one of the N pipelines to send MsgSnap. if isMsgSnap(m) { return p.pipeline.msgc, pipelineMsg } else if writec, ok = p.msgAppV2Writer.writec(); ok && isMsgApp(m) { return writec, streamAppV2 } else if writec, ok = p.writer.writec(); ok { return writec, streamMsg } return p.pipeline.msgc, pipelineMsg }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/skl/arena.go#L63-L78
func (s *Arena) putNode(height int) uint32 { // Compute the amount of the tower that will never be used, since the height // is less than maxHeight. unusedSize := (maxHeight - height) * offsetSize // Pad the allocation with enough bytes to ensure pointer alignment. l := uint32(MaxNodeSize - unusedSize + nodeAlign) n := atomic.AddUint32(&s.n, l) y.AssertTruef(int(n) <= len(s.buf), "Arena too small, toWrite:%d newTotal:%d limit:%d", l, n, len(s.buf)) // Return the aligned offset. m := (n - l + uint32(nodeAlign)) & ^uint32(nodeAlign) return m }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/membership/cluster.go#L155-L164
func (c *RaftCluster) PeerURLs() []string { c.Lock() defer c.Unlock() urls := make([]string, 0) for _, p := range c.members { urls = append(urls, p.PeerURLs...) } sort.Strings(urls) return urls }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/compaction_command.go#L39-L62
func compactionCommandFunc(cmd *cobra.Command, args []string) { if len(args) != 1 { ExitWithError(ExitBadArgs, fmt.Errorf("compaction command needs 1 argument")) } rev, err := strconv.ParseInt(args[0], 10, 64) if err != nil { ExitWithError(ExitError, err) } var opts []clientv3.CompactOption if compactPhysical { opts = append(opts, clientv3.WithCompactPhysical()) } c := mustClientFromCmd(cmd) ctx, cancel := commandCtx(cmd) _, cerr := c.Compact(ctx, rev, opts...) cancel() if cerr != nil { ExitWithError(ExitError, cerr) } fmt.Println("compacted revision", rev) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/contrib/recipes/client.go#L33-L43
func deleteRevKey(kv v3.KV, key string, rev int64) (bool, error) { cmp := v3.Compare(v3.ModRevision(key), "=", rev) req := v3.OpDelete(key) txnresp, err := kv.Txn(context.TODO()).If(cmp).Then(req).Commit() if err != nil { return false, err } else if !txnresp.Succeeded { return false, nil } return true, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/balancer/balancer.go#L126-L171
func (bb *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { if err != nil { bb.lg.Warn("HandleResolvedAddrs called with error", zap.String("balancer-id", bb.id), zap.Error(err)) return } bb.lg.Info("resolved", zap.String("balancer-id", bb.id), zap.Strings("addresses", addrsToStrings(addrs))) bb.mu.Lock() defer bb.mu.Unlock() resolved := make(map[resolver.Address]struct{}) for _, addr := range addrs { resolved[addr] = struct{}{} if _, ok := bb.addrToSc[addr]; !ok { sc, err := bb.currentConn.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{}) if err != nil { bb.lg.Warn("NewSubConn failed", zap.String("balancer-id", bb.id), zap.Error(err), zap.String("address", addr.Addr)) continue } bb.addrToSc[addr] = sc bb.scToAddr[sc] = addr bb.scToSt[sc] = connectivity.Idle sc.Connect() } } for addr, sc := range bb.addrToSc { if _, ok := resolved[addr]; !ok { // was removed by resolver or failed to create subconn bb.currentConn.RemoveSubConn(sc) delete(bb.addrToSc, addr) bb.lg.Info( "removed subconn", zap.String("balancer-id", bb.id), zap.String("address", addr.Addr), zap.String("subconn", scToString(sc)), ) // Keep the state of this sc in bb.scToSt until sc's state becomes Shutdown. // The entry will be deleted in HandleSubConnStateChange. // (DO NOT) delete(bb.scToAddr, sc) // (DO NOT) delete(bb.scToSt, sc) } } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/read_only.go#L52-L59
func (ro *readOnly) addRequest(index uint64, m pb.Message) { ctx := string(m.Entries[0].Data) if _, ok := ro.pendingReadIndex[ctx]; ok { return } ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})} ro.readIndexQueue = append(ro.readIndexQueue, ctx) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/user_command.go#L31-L46
func NewUserCommand() *cobra.Command { ac := &cobra.Command{ Use: "user <subcommand>", Short: "User related commands", } ac.AddCommand(newUserAddCommand()) ac.AddCommand(newUserDeleteCommand()) ac.AddCommand(newUserGetCommand()) ac.AddCommand(newUserListCommand()) ac.AddCommand(newUserChangePasswordCommand()) ac.AddCommand(newUserGrantRoleCommand()) ac.AddCommand(newUserRevokeRoleCommand()) return ac }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/integration/cluster.go#L693-L711
func (m *member) listenGRPC() error { // prefix with localhost so cert has right domain m.grpcAddr = "localhost:" + m.Name if m.useIP { // for IP-only TLS certs m.grpcAddr = "127.0.0.1:" + m.Name } l, err := transport.NewUnixListener(m.grpcAddr) if err != nil { return fmt.Errorf("listen failed on grpc socket %s (%v)", m.grpcAddr, err) } m.grpcBridge, err = newBridge(m.grpcAddr) if err != nil { l.Close() return err } m.grpcAddr = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + m.grpcBridge.inaddr m.grpcListener = l return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/wal/wal.go#L284-L293
func Open(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) { w, err := openAtIndex(lg, dirpath, snap, true) if err != nil { return nil, err } if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil { return nil, err } return w, nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/dir_windows.go#L70-L100
func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (*directoryLockGuard, error) { if readOnly { return nil, ErrWindowsNotSupported } // Convert to absolute path so that Release still works even if we do an unbalanced // chdir in the meantime. absLockFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName)) if err != nil { return nil, errors.Wrap(err, "Cannot get absolute path for pid lock file") } // This call creates a file handler in memory that only one process can use at a time. When // that process ends, the file is deleted by the system. // FILE_ATTRIBUTE_TEMPORARY is used to tell Windows to try to create the handle in memory. // FILE_FLAG_DELETE_ON_CLOSE is not specified in syscall_windows.go but tells Windows to delete // the file when all processes holding the handler are closed. // XXX: this works but it's a bit klunky. i'd prefer to use LockFileEx but it needs unsafe pkg. h, err := syscall.CreateFile( syscall.StringToUTF16Ptr(absLockFilePath), 0, 0, nil, syscall.OPEN_ALWAYS, uint32(FILE_ATTRIBUTE_TEMPORARY|FILE_FLAG_DELETE_ON_CLOSE), 0) if err != nil { return nil, errors.Wrapf(err, "Cannot create lock file %q. Another process is using this Badger database", absLockFilePath) } return &directoryLockGuard{h: h, path: absLockFilePath}, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2discovery/discovery.go#L73-L79
func GetCluster(lg *zap.Logger, durl, dproxyurl string) (string, error) { d, err := newDiscovery(lg, durl, dproxyurl, 0) if err != nil { return "", err } return d.getCluster() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/user_command.go#L123-L158
func userAddCommandFunc(cmd *cobra.Command, args []string) { if len(args) != 1 { ExitWithError(ExitBadArgs, fmt.Errorf("user add command requires user name as its argument")) } var password string var user string if passwordFromFlag != "" { user = args[0] password = passwordFromFlag } else { splitted := strings.SplitN(args[0], ":", 2) if len(splitted) < 2 { user = args[0] if !passwordInteractive { fmt.Scanf("%s", &password) } else { password = readPasswordInteractive(args[0]) } } else { user = splitted[0] password = splitted[1] if len(user) == 0 { ExitWithError(ExitBadArgs, fmt.Errorf("empty user name is not allowed")) } } } resp, err := mustClientFromCmd(cmd).Auth.UserAdd(context.TODO(), user, password) if err != nil { ExitWithError(ExitError, err) } display.UserAdd(user, *resp) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/raft.go#L1394-L1397
func (r *raft) promotable() bool { _, ok := r.prs[r.id] return ok }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/contrib/recipes/barrier.go#L51-L66
func (b *Barrier) Wait() error { resp, err := b.client.Get(b.ctx, b.key, v3.WithFirstKey()...) if err != nil { return err } if len(resp.Kvs) == 0 { // key already removed return nil } _, err = WaitEvents( b.client, b.key, resp.Header.Revision, []mvccpb.Event_EventType{mvccpb.PUT, mvccpb.DELETE}) return err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/storage.go#L100-L105
func (ms *MemoryStorage) SetHardState(st pb.HardState) error { ms.Lock() defer ms.Unlock() ms.hardState = st return nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/net_transport.go#L744-L757
func (n *netPipeline) Close() error { n.shutdownLock.Lock() defer n.shutdownLock.Unlock() if n.shutdown { return nil } // Release the connection n.conn.Release() n.shutdown = true close(n.shutdownCh) return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/snap/db.go#L83-L100
func (s *Snapshotter) DBFilePath(id uint64) (string, error) { if _, err := fileutil.ReadDir(s.dir); err != nil { return "", err } fn := s.dbFilePath(id) if fileutil.Exist(fn) { return fn, nil } if s.lg != nil { s.lg.Warn( "failed to find [SNAPSHOT-INDEX].snap.db", zap.Uint64("snapshot-index", id), zap.String("snapshot-file-path", fn), zap.Error(ErrNoDBSnapshot), ) } return "", ErrNoDBSnapshot }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3rpc/watch.go#L48-L61
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { return &watchServer{ lg: s.Cfg.Logger, clusterID: int64(s.Cluster().ID()), memberID: int64(s.ID()), maxRequestBytes: int(s.Cfg.MaxRequestBytes + grpcOverheadBytes), sg: s, watchable: s.Watchable(), ag: s, } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/membership/cluster.go#L255-L326
func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error { members, removed := membersFromStore(c.lg, c.v2store) id := types.ID(cc.NodeID) if removed[id] { return ErrIDRemoved } switch cc.Type { case raftpb.ConfChangeAddNode: if members[id] != nil { return ErrIDExists } urls := make(map[string]bool) for _, m := range members { for _, u := range m.PeerURLs { urls[u] = true } } m := new(Member) if err := json.Unmarshal(cc.Context, m); err != nil { if c.lg != nil { c.lg.Panic("failed to unmarshal member", zap.Error(err)) } else { plog.Panicf("unmarshal member should never fail: %v", err) } } for _, u := range m.PeerURLs { if urls[u] { return ErrPeerURLexists } } case raftpb.ConfChangeRemoveNode: if members[id] == nil { return ErrIDNotFound } case raftpb.ConfChangeUpdateNode: if members[id] == nil { return ErrIDNotFound } urls := make(map[string]bool) for _, m := range members { if m.ID == id { continue } for _, u := range m.PeerURLs { urls[u] = true } } m := new(Member) if err := json.Unmarshal(cc.Context, m); err != nil { if c.lg != nil { c.lg.Panic("failed to unmarshal member", zap.Error(err)) } else { plog.Panicf("unmarshal member should never fail: %v", err) } } for _, u := range m.PeerURLs { if urls[u] { return ErrPeerURLexists } } default: if c.lg != nil { c.lg.Panic("unknown ConfChange type", zap.String("type", cc.Type.String())) } else { plog.Panicf("ConfChange type should be either AddNode, RemoveNode or UpdateNode") } } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/transport/tls.go#L25-L49
func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { t, err := NewTransport(tlsInfo, 5*time.Second) if err != nil { return nil, err } var errs []string var endpoints []string for _, ep := range eps { if !strings.HasPrefix(ep, "https://") { errs = append(errs, fmt.Sprintf("%q is insecure", ep)) continue } conn, cerr := t.Dial("tcp", ep[len("https://"):]) if cerr != nil { errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr)) continue } conn.Close() endpoints = append(endpoints, ep) } if len(errs) != 0 { err = fmt.Errorf("%s", strings.Join(errs, ",")) } return endpoints, err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/server.go#L2408-L2428
func (s *EtcdServer) goAttach(f func()) { s.wgMu.RLock() // this blocks with ongoing close(s.stopping) defer s.wgMu.RUnlock() select { case <-s.stopping: if lg := s.getLogger(); lg != nil { lg.Warn("server has stopped; skipping goAttach") } else { plog.Warning("server has stopped (skipping goAttach)") } return default: } // now safe to add since waitgroup wait has not started yet s.wg.Add(1) go func() { defer s.wg.Done() f() }() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2stats/server.go#L114-L128
func (ss *ServerStats) SendAppendReq(reqSize int) { ss.Lock() defer ss.Unlock() ss.becomeLeader() ss.sendRateQueue.Insert( &RequestStats{ SendingTime: time.Now(), Size: reqSize, }, ) ss.SendAppendRequestCnt++ }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/integration/cluster.go#L1006-L1027
func (m *member) Close() { if m.grpcBridge != nil { m.grpcBridge.Close() m.grpcBridge = nil } if m.serverClient != nil { m.serverClient.Close() m.serverClient = nil } if m.grpcServer != nil { m.grpcServer.Stop() m.grpcServer.GracefulStop() m.grpcServer = nil m.grpcServerPeer.Stop() m.grpcServerPeer.GracefulStop() m.grpcServerPeer = nil } m.s.HardStop() for _, f := range m.serverClosers { f() } }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/levels.go#L58-L78
func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error { // 1. Check all files in manifest exist. for id := range mf.Tables { if _, ok := idMap[id]; !ok { return fmt.Errorf("file does not exist for table %d", id) } } // 2. Delete files that shouldn't exist. for id := range idMap { if _, ok := mf.Tables[id]; !ok { kv.elog.Printf("Table file %d not referenced in MANIFEST\n", id) filename := table.NewFilename(id, kv.opt.Dir) if err := os.Remove(filename); err != nil { return y.Wrapf(err, "While removing table %d", id) } } } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/osutil/interrupt_unix.go#L48-L80
func HandleInterrupts(lg *zap.Logger) { notifier := make(chan os.Signal, 1) signal.Notify(notifier, syscall.SIGINT, syscall.SIGTERM) go func() { sig := <-notifier interruptRegisterMu.Lock() ihs := make([]InterruptHandler, len(interruptHandlers)) copy(ihs, interruptHandlers) interruptRegisterMu.Unlock() interruptExitMu.Lock() if lg != nil { lg.Info("received signal; shutting down", zap.String("signal", sig.String())) } else { plog.Noticef("received %v signal, shutting down...", sig) } for _, h := range ihs { h() } signal.Stop(notifier) pid := syscall.Getpid() // exit directly if it is the "init" process, since the kernel will not help to kill pid 1. if pid == 1 { os.Exit(0) } setDflSignal(sig.(syscall.Signal)) syscall.Kill(pid, sig.(syscall.Signal)) }() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/agent/handler.go#L321-L392
func (srv *Server) saveTLSAssets() error { if srv.Member.PeerCertPath != "" { if srv.Member.PeerCertData == "" { return fmt.Errorf("got empty data for %q", srv.Member.PeerCertPath) } if err := ioutil.WriteFile(srv.Member.PeerCertPath, []byte(srv.Member.PeerCertData), 0644); err != nil { return err } } if srv.Member.PeerKeyPath != "" { if srv.Member.PeerKeyData == "" { return fmt.Errorf("got empty data for %q", srv.Member.PeerKeyPath) } if err := ioutil.WriteFile(srv.Member.PeerKeyPath, []byte(srv.Member.PeerKeyData), 0644); err != nil { return err } } if srv.Member.PeerTrustedCAPath != "" { if srv.Member.PeerTrustedCAData == "" { return fmt.Errorf("got empty data for %q", srv.Member.PeerTrustedCAPath) } if err := ioutil.WriteFile(srv.Member.PeerTrustedCAPath, []byte(srv.Member.PeerTrustedCAData), 0644); err != nil { return err } } if srv.Member.PeerCertPath != "" && srv.Member.PeerKeyPath != "" && srv.Member.PeerTrustedCAPath != "" { srv.lg.Info( "wrote", zap.String("peer-cert", srv.Member.PeerCertPath), zap.String("peer-key", srv.Member.PeerKeyPath), zap.String("peer-trusted-ca", srv.Member.PeerTrustedCAPath), ) } if srv.Member.ClientCertPath != "" { if srv.Member.ClientCertData == "" { return fmt.Errorf("got empty data for %q", srv.Member.ClientCertPath) } if err := ioutil.WriteFile(srv.Member.ClientCertPath, []byte(srv.Member.ClientCertData), 0644); err != nil { return err } } if srv.Member.ClientKeyPath != "" { if srv.Member.ClientKeyData == "" { return fmt.Errorf("got empty data for %q", srv.Member.ClientKeyPath) } if err := ioutil.WriteFile(srv.Member.ClientKeyPath, []byte(srv.Member.ClientKeyData), 0644); err != nil { return err } } if srv.Member.ClientTrustedCAPath != "" { if srv.Member.ClientTrustedCAData == "" { return fmt.Errorf("got empty data for %q", srv.Member.ClientTrustedCAPath) } if err := ioutil.WriteFile(srv.Member.ClientTrustedCAPath, []byte(srv.Member.ClientTrustedCAData), 0644); err != nil { return err } } if srv.Member.ClientCertPath != "" && srv.Member.ClientKeyPath != "" && srv.Member.ClientTrustedCAPath != "" { srv.lg.Info( "wrote", zap.String("client-cert", srv.Member.ClientCertPath), zap.String("client-key", srv.Member.ClientKeyPath), zap.String("client-trusted-ca", srv.Member.ClientTrustedCAPath), ) } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/wal/wal.go#L508-L578
func Verify(lg *zap.Logger, walDir string, snap walpb.Snapshot) error { var metadata []byte var err error var match bool rec := &walpb.Record{} names, nameIndex, err := selectWALFiles(lg, walDir, snap) if err != nil { return err } // open wal files in read mode, so that there is no conflict // when the same WAL is opened elsewhere in write mode rs, _, closer, err := openWALFiles(lg, walDir, names, nameIndex, false) if err != nil { return err } // create a new decoder from the readers on the WAL files decoder := newDecoder(rs...) for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) { switch rec.Type { case metadataType: if metadata != nil && !bytes.Equal(metadata, rec.Data) { return ErrMetadataConflict } metadata = rec.Data case crcType: crc := decoder.crc.Sum32() // Current crc of decoder must match the crc of the record. // We need not match 0 crc, since the decoder is a new one at this point. if crc != 0 && rec.Validate(crc) != nil { return ErrCRCMismatch } decoder.updateCRC(rec.Crc) case snapshotType: var loadedSnap walpb.Snapshot pbutil.MustUnmarshal(&loadedSnap, rec.Data) if loadedSnap.Index == snap.Index { if loadedSnap.Term != snap.Term { return ErrSnapshotMismatch } match = true } // We ignore all entry and state type records as these // are not necessary for validating the WAL contents case entryType: case stateType: default: return fmt.Errorf("unexpected block type %d", rec.Type) } } if closer != nil { closer() } // We do not have to read out all the WAL entries // as the decoder is opened in read mode. if err != io.EOF && err != io.ErrUnexpectedEOF { return err } if !match { return ErrSnapshotNotFound } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/node.go#L480-L517
func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error { if m.Type != pb.MsgProp { select { case n.recvc <- m: return nil case <-ctx.Done(): return ctx.Err() case <-n.done: return ErrStopped } } ch := n.propc pm := msgWithResult{m: m} if wait { pm.result = make(chan error, 1) } select { case ch <- pm: if !wait { return nil } case <-ctx.Done(): return ctx.Err() case <-n.done: return ErrStopped } select { case err := <-pm.result: if err != nil { return err } case <-ctx.Done(): return ctx.Err() case <-n.done: return ErrStopped } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/watcher_hub.go#L190-L200
func isHidden(watchPath, keyPath string) bool { // When deleting a directory, watchPath might be deeper than the actual keyPath // For example, when deleting /foo we also need to notify watchers on /foo/bar. if len(watchPath) > len(keyPath) { return false } // if watch path is just a "/", after path will start without "/" // add a "/" to deal with the special case when watchPath is "/" afterPath := path.Clean("/" + keyPath[len(watchPath):]) return strings.Contains(afterPath, "/_") }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/log.go#L261-L271
func (l *raftLog) allEntries() []pb.Entry { ents, err := l.entries(l.firstIndex(), noLimit) if err == nil { return ents } if err == ErrCompacted { // try again if there was a racing compaction return l.allEntries() } // TODO (xiangli): handle error? panic(err) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/y.go#L271-L281
func (t *Throttle) Done(err error) { if err != nil { t.errCh <- err } select { case <-t.ch: default: panic("Throttle Do Done mismatch") } t.wg.Done() }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/skl/skl.go#L375-L390
func (s *Skiplist) Get(key []byte) y.ValueStruct { n, _ := s.findNear(key, false, true) // findGreaterOrEqual. if n == nil { return y.ValueStruct{} } nextKey := s.arena.getKey(n.keyOffset, n.keySize) if !y.SameKey(key, nextKey) { return y.ValueStruct{} } valOffset, valSize := n.getValueOffset() vs := s.arena.getVal(valOffset, valSize) vs.Version = y.ParseTs(nextKey) return vs }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/member_command.go#L191-L215
func memberUpdateCommandFunc(cmd *cobra.Command, args []string) { if len(args) != 1 { ExitWithError(ExitBadArgs, fmt.Errorf("member ID is not provided")) } id, err := strconv.ParseUint(args[0], 16, 64) if err != nil { ExitWithError(ExitBadArgs, fmt.Errorf("bad member ID arg (%v), expecting ID in Hex", err)) } if len(memberPeerURLs) == 0 { ExitWithError(ExitBadArgs, fmt.Errorf("member peer urls not provided")) } urls := strings.Split(memberPeerURLs, ",") ctx, cancel := commandCtx(cmd) resp, err := mustClientFromCmd(cmd).MemberUpdate(ctx, id, urls) cancel() if err != nil { ExitWithError(ExitError, err) } display.MemberUpdate(id, *resp) }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/raft.go#L670-L703
func (r *Raft) checkLeaderLease() time.Duration { // Track contacted nodes, we can always contact ourself contacted := 1 // Check each follower var maxDiff time.Duration now := time.Now() for peer, f := range r.leaderState.replState { diff := now.Sub(f.LastContact()) if diff <= r.conf.LeaderLeaseTimeout { contacted++ if diff > maxDiff { maxDiff = diff } } else { // Log at least once at high value, then debug. Otherwise it gets very verbose. if diff <= 3*r.conf.LeaderLeaseTimeout { r.logger.Warn(fmt.Sprintf("Failed to contact %v in %v", peer, diff)) } else { r.logger.Debug(fmt.Sprintf("Failed to contact %v in %v", peer, diff)) } } metrics.AddSample([]string{"raft", "leader", "lastContact"}, float32(diff/time.Millisecond)) } // Verify we can contact a quorum quorum := r.quorumSize() if contacted < quorum { r.logger.Warn("Failed to contact quorum of nodes, stepping down") r.setState(Follower) metrics.IncrCounter([]string{"raft", "transition", "leader_lease_timeout"}, 1) } return maxDiff }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3compactor/revision.go#L49-L59
func newRevision(lg *zap.Logger, clock clockwork.Clock, retention int64, rg RevGetter, c Compactable) *Revision { rc := &Revision{ lg: lg, clock: clock, retention: retention, rg: rg, c: c, } rc.ctx, rc.cancel = context.WithCancel(context.Background()) return rc }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/embed/config.go#L366-L421
func NewConfig() *Config { lpurl, _ := url.Parse(DefaultListenPeerURLs) apurl, _ := url.Parse(DefaultInitialAdvertisePeerURLs) lcurl, _ := url.Parse(DefaultListenClientURLs) acurl, _ := url.Parse(DefaultAdvertiseClientURLs) cfg := &Config{ MaxSnapFiles: DefaultMaxSnapshots, MaxWalFiles: DefaultMaxWALs, Name: DefaultName, SnapshotCount: etcdserver.DefaultSnapshotCount, SnapshotCatchUpEntries: etcdserver.DefaultSnapshotCatchUpEntries, MaxTxnOps: DefaultMaxTxnOps, MaxRequestBytes: DefaultMaxRequestBytes, GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime, GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval, GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout, TickMs: 100, ElectionMs: 1000, InitialElectionTickAdvance: true, LPUrls: []url.URL{*lpurl}, LCUrls: []url.URL{*lcurl}, APUrls: []url.URL{*apurl}, ACUrls: []url.URL{*acurl}, ClusterState: ClusterStateFlagNew, InitialClusterToken: "etcd-cluster", StrictReconfigCheck: DefaultStrictReconfigCheck, Metrics: "basic", EnableV2: DefaultEnableV2, CORS: map[string]struct{}{"*": {}}, HostWhitelist: map[string]struct{}{"*": {}}, AuthToken: "simple", BcryptCost: uint(bcrypt.DefaultCost), PreVote: false, // TODO: enable by default in v3.5 loggerMu: new(sync.RWMutex), logger: nil, Logger: "capnslog", DeprecatedLogOutput: []string{DefaultLogOutput}, LogOutputs: []string{DefaultLogOutput}, Debug: false, LogPkgLevels: "", } cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) return cfg }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/watcher.go#L44-L75
func (w *watcher) notify(e *Event, originalPath bool, deleted bool) bool { // watcher is interested the path in three cases and under one condition // the condition is that the event happens after the watcher's sinceIndex // 1. the path at which the event happens is the path the watcher is watching at. // For example if the watcher is watching at "/foo" and the event happens at "/foo", // the watcher must be interested in that event. // 2. the watcher is a recursive watcher, it interests in the event happens after // its watching path. For example if watcher A watches at "/foo" and it is a recursive // one, it will interest in the event happens at "/foo/bar". // 3. when we delete a directory, we need to force notify all the watchers who watches // at the file we need to delete. // For example a watcher is watching at "/foo/bar". And we deletes "/foo". The watcher // should get notified even if "/foo" is not the path it is watching. if (w.recursive || originalPath || deleted) && e.Index() >= w.sinceIndex { // We cannot block here if the eventChan capacity is full, otherwise // etcd will hang. eventChan capacity is full when the rate of // notifications are higher than our send rate. // If this happens, we close the channel. select { case w.eventChan <- e: default: // We have missed a notification. Remove the watcher. // Removing the watcher also closes the eventChan. w.remove() } return true } return false }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/log.go#L56-L80
func newLogWithSize(storage Storage, logger Logger, maxNextEntsSize uint64) *raftLog { if storage == nil { log.Panic("storage must not be nil") } log := &raftLog{ storage: storage, logger: logger, maxNextEntsSize: maxNextEntsSize, } firstIndex, err := storage.FirstIndex() if err != nil { panic(err) // TODO(bdarnell) } lastIndex, err := storage.LastIndex() if err != nil { panic(err) // TODO(bdarnell) } log.unstable.offset = lastIndex + 1 log.unstable.logger = logger // Initialize our committed and applied pointers to the time of the last compaction. log.committed = firstIndex - 1 log.applied = firstIndex - 1 return log }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/api.go#L519-L572
func (r *Raft) restoreSnapshot() error { snapshots, err := r.snapshots.List() if err != nil { r.logger.Error(fmt.Sprintf("Failed to list snapshots: %v", err)) return err } // Try to load in order of newest to oldest for _, snapshot := range snapshots { _, source, err := r.snapshots.Open(snapshot.ID) if err != nil { r.logger.Error(fmt.Sprintf("Failed to open snapshot %v: %v", snapshot.ID, err)) continue } defer source.Close() if err := r.fsm.Restore(source); err != nil { r.logger.Error(fmt.Sprintf("Failed to restore snapshot %v: %v", snapshot.ID, err)) continue } // Log success r.logger.Info(fmt.Sprintf("Restored from snapshot %v", snapshot.ID)) // Update the lastApplied so we don't replay old logs r.setLastApplied(snapshot.Index) // Update the last stable snapshot info r.setLastSnapshot(snapshot.Index, snapshot.Term) // Update the configuration if snapshot.Version > 0 { r.configurations.committed = snapshot.Configuration r.configurations.committedIndex = snapshot.ConfigurationIndex r.configurations.latest = snapshot.Configuration r.configurations.latestIndex = snapshot.ConfigurationIndex } else { configuration := decodePeers(snapshot.Peers, r.trans) r.configurations.committed = configuration r.configurations.committedIndex = snapshot.Index r.configurations.latest = configuration r.configurations.latestIndex = snapshot.Index } // Success! return nil } // If we had snapshots and failed to load them, its an error if len(snapshots) > 0 { return fmt.Errorf("failed to load any existing snapshots") } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/integration/cluster.go#L1160-L1165
func (m *member) RecoverPartition(t testing.TB, others ...*member) { for _, other := range others { m.s.MendPeer(other.s.ID()) other.s.MendPeer(m.s.ID()) } }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/histogram.go#L80-L86
func createHistogramBins(minExponent, maxExponent uint32) []int64 { var bins []int64 for i := minExponent; i <= maxExponent; i++ { bins = append(bins, int64(1)<<i) } return bins }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/storage.go#L142-L146
func (ms *MemoryStorage) LastIndex() (uint64, error) { ms.Lock() defer ms.Unlock() return ms.lastIndex(), nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/retry.go#L164-L168
func RetryClusterClient(c *Client) pb.ClusterClient { return &retryClusterClient{ cc: pb.NewClusterClient(c.conn), } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/capability.go#L60-L82
func UpdateCapability(lg *zap.Logger, v *semver.Version) { if v == nil { // if recovered but version was never set by cluster return } enableMapMu.Lock() if curVersion != nil && !curVersion.LessThan(*v) { enableMapMu.Unlock() return } curVersion = v enabledMap = capabilityMaps[curVersion.String()] enableMapMu.Unlock() if lg != nil { lg.Info( "enabled capabilities for version", zap.String("cluster-version", version.Cluster(v.String())), ) } else { plog.Infof("enabled capabilities for version %s", version.Cluster(v.String())) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/rawnode.go#L259-L270
func (rn *RawNode) WithProgress(visitor func(id uint64, typ ProgressType, pr Progress)) { for id, pr := range rn.raft.prs { pr := *pr pr.ins = nil visitor(id, ProgressTypePeer, pr) } for id, pr := range rn.raft.learnerPrs { pr := *pr pr.ins = nil visitor(id, ProgressTypeLearner, pr) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/snap/snapshotter.go#L219-L235
func (s *Snapshotter) snapNames() ([]string, error) { dir, err := os.Open(s.dir) if err != nil { return nil, err } defer dir.Close() names, err := dir.Readdirnames(-1) if err != nil { return nil, err } snaps := checkSuffix(s.lg, names) if len(snaps) == 0 { return nil, ErrNoSnapshot } sort.Sort(sort.Reverse(sort.StringSlice(snaps))) return snaps, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/raft.go#L1210-L1253
func stepCandidate(r *raft, m pb.Message) error { // Only handle vote responses corresponding to our candidacy (while in // StateCandidate, we may get stale MsgPreVoteResp messages in this term from // our pre-candidate state). var myVoteRespType pb.MessageType if r.state == StatePreCandidate { myVoteRespType = pb.MsgPreVoteResp } else { myVoteRespType = pb.MsgVoteResp } switch m.Type { case pb.MsgProp: r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) return ErrProposalDropped case pb.MsgApp: r.becomeFollower(m.Term, m.From) // always m.Term == r.Term r.handleAppendEntries(m) case pb.MsgHeartbeat: r.becomeFollower(m.Term, m.From) // always m.Term == r.Term r.handleHeartbeat(m) case pb.MsgSnap: r.becomeFollower(m.Term, m.From) // always m.Term == r.Term r.handleSnapshot(m) case myVoteRespType: gr := r.poll(m.From, m.Type, !m.Reject) r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr) switch r.quorum() { case gr: if r.state == StatePreCandidate { r.campaign(campaignElection) } else { r.becomeLeader() r.bcastAppend() } case len(r.votes) - gr: // pb.MsgPreVoteResp contains future term of pre-candidate // m.Term > r.Term; reuse r.Term r.becomeFollower(r.Term, None) } case pb.MsgTimeoutNow: r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From) } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/rafthttp/util.go#L63-L78
func createPostRequest(u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request { uu := u uu.Path = path req, err := http.NewRequest("POST", uu.String(), body) if err != nil { plog.Panicf("unexpected new request error (%v)", err) } req.Header.Set("Content-Type", ct) req.Header.Set("X-Server-From", from.String()) req.Header.Set("X-Server-Version", version.Version) req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion) req.Header.Set("X-Etcd-Cluster-ID", cid.String()) setPeerURLsHeader(req, urls) return req }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/contrib/recipes/double_barrier.go#L46-L75
func (b *DoubleBarrier) Enter() error { client := b.s.Client() ek, err := newUniqueEphemeralKey(b.s, b.key+"/waiters") if err != nil { return err } b.myKey = ek resp, err := client.Get(b.ctx, b.key+"/waiters", clientv3.WithPrefix()) if err != nil { return err } if len(resp.Kvs) > b.count { return ErrTooManyClients } if len(resp.Kvs) == b.count { // unblock waiters _, err = client.Put(b.ctx, b.key+"/ready", "") return err } _, err = WaitEvents( client, b.key+"/ready", ek.Revision(), []mvccpb.Event_EventType{mvccpb.PUT}) return err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/cluster_util.go#L299-L357
func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*version.Versions, error) { cc := &http.Client{ Transport: rt, } var ( err error resp *http.Response ) for _, u := range m.PeerURLs { addr := u + "/version" resp, err = cc.Get(addr) if err != nil { if lg != nil { lg.Warn( "failed to reach the peer URL", zap.String("address", addr), zap.String("remote-member-id", m.ID.String()), zap.Error(err), ) } else { plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err) } continue } var b []byte b, err = ioutil.ReadAll(resp.Body) resp.Body.Close() if err != nil { if lg != nil { lg.Warn( "failed to read body of response", zap.String("address", addr), zap.String("remote-member-id", m.ID.String()), zap.Error(err), ) } else { plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err) } continue } var vers version.Versions if err = json.Unmarshal(b, &vers); err != nil { if lg != nil { lg.Warn( "failed to unmarshal response", zap.String("address", addr), zap.String("remote-member-id", m.ID.String()), zap.Error(err), ) } else { plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err) } continue } return &vers, nil } return nil, err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/etcdhttp/base.go#L141-L203
func WriteError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) { if err == nil { return } switch e := err.(type) { case *v2error.Error: e.WriteTo(w) case *httptypes.HTTPError: if et := e.WriteTo(w); et != nil { if lg != nil { lg.Debug( "failed to write v2 HTTP error", zap.String("remote-addr", r.RemoteAddr), zap.String("internal-server-error", e.Error()), zap.Error(et), ) } else { plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) } } default: switch err { case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy: if lg != nil { lg.Warn( "v2 response error", zap.String("remote-addr", r.RemoteAddr), zap.String("internal-server-error", err.Error()), ) } else { mlog.MergeError(err) } default: if lg != nil { lg.Warn( "unexpected v2 response error", zap.String("remote-addr", r.RemoteAddr), zap.String("internal-server-error", err.Error()), ) } else { mlog.MergeErrorf("got unexpected response error (%v)", err) } } herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") if et := herr.WriteTo(w); et != nil { if lg != nil { lg.Debug( "failed to write v2 HTTP error", zap.String("remote-addr", r.RemoteAddr), zap.String("internal-server-error", err.Error()), zap.Error(et), ) } else { plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) } } } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/flags/selective_string.go#L46-L53
func (ss *SelectiveStringValue) Valids() []string { s := make([]string, 0, len(ss.valids)) for k := range ss.valids { s = append(s, k) } sort.Strings(s) return s }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/value.go#L1010-L1020
func (vlog *valueLog) getFileRLocked(fid uint32) (*logFile, error) { vlog.filesLock.RLock() defer vlog.filesLock.RUnlock() ret, ok := vlog.filesMap[fid] if !ok { // log file has gone away, will need to retry the operation. return nil, ErrRetry } ret.lock.RLock() return ret, nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/api.go#L712-L722
func (r *Raft) RemovePeer(peer ServerAddress) Future { if r.protocolVersion > 2 { return errorFuture{ErrUnsupportedProtocol} } return r.requestConfigChange(configurationChangeRequest{ command: RemoveServer, serverID: ServerID(peer), prevIndex: 0, }, 0) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/proxy/grpcproxy/leader.go#L93-L101
func (l *leader) gotLeader() { l.mu.Lock() defer l.mu.Unlock() select { case <-l.leaderc: l.leaderc = make(chan struct{}) default: } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv2/command/get_command.go#L27-L41
func NewGetCommand() cli.Command { return cli.Command{ Name: "get", Usage: "retrieve the value of a key", ArgsUsage: "<key>", Flags: []cli.Flag{ cli.BoolFlag{Name: "sort", Usage: "returns result in sorted order"}, cli.BoolFlag{Name: "quorum, q", Usage: "require quorum for get request"}, }, Action: func(c *cli.Context) error { getCommandFunc(c, mustNewKeyAPI(c)) return nil }, } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/raft.go#L644-L678
func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { ids := make(map[uint64]bool) if snap != nil { for _, id := range snap.Metadata.ConfState.Nodes { ids[id] = true } } for _, e := range ents { if e.Type != raftpb.EntryConfChange { continue } var cc raftpb.ConfChange pbutil.MustUnmarshal(&cc, e.Data) switch cc.Type { case raftpb.ConfChangeAddNode: ids[cc.NodeID] = true case raftpb.ConfChangeRemoveNode: delete(ids, cc.NodeID) case raftpb.ConfChangeUpdateNode: // do nothing default: if lg != nil { lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String())) } else { plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!") } } } sids := make(types.Uint64Slice, 0, len(ids)) for id := range ids { sids = append(sids, id) } sort.Sort(sids) return []uint64(sids) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/store.go#L644-L668
func (s *store) internalGet(nodePath string) (*node, *v2error.Error) { nodePath = path.Clean(path.Join("/", nodePath)) walkFunc := func(parent *node, name string) (*node, *v2error.Error) { if !parent.IsDir() { err := v2error.NewError(v2error.EcodeNotDir, parent.Path, s.CurrentIndex) return nil, err } child, ok := parent.Children[name] if ok { return child, nil } return nil, v2error.NewError(v2error.EcodeKeyNotFound, path.Join(parent.Path, name), s.CurrentIndex) } f, err := s.walk(nodePath, walkFunc) if err != nil { return nil, err } return f, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/election.go#L132-L145
func (e *Election) Resign(ctx context.Context) (err error) { if e.leaderSession == nil { return nil } client := e.session.Client() cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit() if err == nil { e.hdr = resp.Header } e.leaderKey = "" e.leaderSession = nil return err }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/table/table.go#L192-L198
func (t *Table) Close() error { if t.loadingMode == options.MemoryMap { y.Munmap(t.mmap) } return t.fd.Close() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/balancer/picker/roundrobin_balanced.go#L54-L92
func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { rb.mu.RLock() n := len(rb.scs) rb.mu.RUnlock() if n == 0 { return nil, nil, balancer.ErrNoSubConnAvailable } rb.mu.Lock() cur := rb.next sc := rb.scs[cur] picked := rb.scToAddr[sc].Addr rb.next = (rb.next + 1) % len(rb.scs) rb.mu.Unlock() rb.lg.Debug( "picked", zap.String("address", picked), zap.Int("subconn-index", cur), zap.Int("subconn-size", n), ) doneFunc := func(info balancer.DoneInfo) { // TODO: error handling? fss := []zapcore.Field{ zap.Error(info.Err), zap.String("address", picked), zap.Bool("success", info.Err == nil), zap.Bool("bytes-sent", info.BytesSent), zap.Bool("bytes-received", info.BytesReceived), } if info.Err == nil { rb.lg.Debug("balancer done", fss...) } else { rb.lg.Warn("balancer failed", fss...) } } return sc, doneFunc, nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/iterator.go#L430-L437
func (txn *Txn) NewKeyIterator(key []byte, opt IteratorOptions) *Iterator { if len(opt.Prefix) > 0 { panic("opt.Prefix should be nil for NewKeyIterator.") } opt.Prefix = key // This key must be without the timestamp. opt.prefixIsKey = true return txn.NewIterator(opt) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/node.go#L171-L183
func (n *node) GetChild(name string) (*node, *v2error.Error) { if !n.IsDir() { return nil, v2error.NewError(v2error.EcodeNotDir, n.Path, n.store.CurrentIndex) } child, ok := n.Children[name] if ok { return child, nil } return nil, nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/iterator.go#L249-L254
func (s *MergeIterator) Seek(key []byte) { for _, itr := range s.all { itr.Seek(key) } s.initHeap() }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/manifest.go#L184-L217
func (mf *manifestFile) addChanges(changesParam []*pb.ManifestChange) error { changes := pb.ManifestChangeSet{Changes: changesParam} buf, err := changes.Marshal() if err != nil { return err } // Maybe we could use O_APPEND instead (on certain file systems) mf.appendLock.Lock() if err := applyChangeSet(&mf.manifest, &changes); err != nil { mf.appendLock.Unlock() return err } // Rewrite manifest if it'd shrink by 1/10 and it's big enough to care if mf.manifest.Deletions > mf.deletionsRewriteThreshold && mf.manifest.Deletions > manifestDeletionsRatio*(mf.manifest.Creations-mf.manifest.Deletions) { if err := mf.rewrite(); err != nil { mf.appendLock.Unlock() return err } } else { var lenCrcBuf [8]byte binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(buf))) binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(buf, y.CastagnoliCrcTable)) buf = append(lenCrcBuf[:], buf...) if _, err := mf.fp.Write(buf); err != nil { mf.appendLock.Unlock() return err } } mf.appendLock.Unlock() return mf.fp.Sync() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/client/keys.go#L475-L487
func v2KeysURL(ep url.URL, prefix, key string) *url.URL { // We concatenate all parts together manually. We cannot use // path.Join because it does not reserve trailing slash. // We call CanonicalURLPath to further cleanup the path. if prefix != "" && prefix[0] != '/' { prefix = "/" + prefix } if key != "" && key[0] != '/' { key = "/" + key } ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key) return &ep }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/apply.go#L381-L409
func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) { reqs := rt.Success if !txnPath[0] { reqs = rt.Failure } resps := make([]*pb.ResponseOp, len(reqs)) txnResp = &pb.TxnResponse{ Responses: resps, Succeeded: txnPath[0], Header: &pb.ResponseHeader{}, } for i, req := range reqs { switch tv := req.Request.(type) { case *pb.RequestOp_RequestRange: resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{}} case *pb.RequestOp_RequestPut: resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{}} case *pb.RequestOp_RequestDeleteRange: resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{}} case *pb.RequestOp_RequestTxn: resp, txns := newTxnResp(tv.RequestTxn, txnPath[1:]) resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}} txnPath = txnPath[1+txns:] txnCount += txns + 1 default: } } return txnResp, txnCount }