id
stringlengths
95
167
text
stringlengths
69
15.9k
title
stringclasses
1 value
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/levels.go#L369-L371
func (s *levelsController) isLevel0Compactable() bool { return s.levels[0].numTables() >= s.kv.opt.NumLevelZeroTables }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go#L94-L155
func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error { mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_Lock_Lock_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_Lock_Unlock_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/stm.go#L237-L243
func (ws writeSet) puts() []v3.Op { puts := make([]v3.Op, 0, len(ws)) for _, v := range ws { puts = append(puts, v.op) } return puts }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/txn.go#L726-L736
func (db *DB) View(fn func(txn *Txn) error) error { var txn *Txn if db.opt.managedTxns { txn = db.NewTransactionAt(math.MaxUint64, false) } else { txn = db.NewTransaction(false) } defer txn.Discard() return fn(txn) }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/default_context.go#L189-L221
func (d *DefaultContext) Redirect(status int, url string, args ...interface{}) error { d.Flash().persist(d.Session()) if strings.HasSuffix(url, "Path()") { if len(args) > 1 { return fmt.Errorf("you must pass only a map[string]interface{} to a route path: %T", args) } var m map[string]interface{} if len(args) == 1 { rv := reflect.Indirect(reflect.ValueOf(args[0])) if !rv.Type().ConvertibleTo(mapType) { return fmt.Errorf("you must pass only a map[string]interface{} to a route path: %T", args) } m = rv.Convert(mapType).Interface().(map[string]interface{}) } h, ok := d.Value(strings.TrimSuffix(url, "()")).(RouteHelperFunc) if !ok { return fmt.Errorf("could not find a route helper named %s", url) } url, err := h(m) if err != nil { return err } http.Redirect(d.Response(), d.Request(), string(url), status) return nil } if len(args) > 0 { url = fmt.Sprintf(url, args...) } http.Redirect(d.Response(), d.Request(), url, status) return nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/commitment.go#L85-L101
func (c *commitment) recalculate() { if len(c.matchIndexes) == 0 { return } matched := make([]uint64, 0, len(c.matchIndexes)) for _, idx := range c.matchIndexes { matched = append(matched, idx) } sort.Sort(uint64Slice(matched)) quorumMatchIndex := matched[(len(matched)-1)/2] if quorumMatchIndex > c.commitIndex && quorumMatchIndex >= c.startIndex { c.commitIndex = quorumMatchIndex asyncNotifyCh(c.commitCh) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/log.go#L347-L361
func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error { if lo > hi { l.logger.Panicf("invalid slice %d > %d", lo, hi) } fi := l.firstIndex() if lo < fi { return ErrCompacted } length := l.lastIndex() + 1 - fi if lo < fi || hi > fi+length { l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex()) } return nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/iterator.go#L496-L511
func (it *Iterator) Next() { // Reuse current item it.item.wg.Wait() // Just cleaner to wait before pushing to avoid doing ref counting. it.waste.push(it.item) // Set next item to current it.item = it.data.pop() for it.iitr.Valid() { if it.parseItem() { // parseItem calls one extra next. // This is used to deal with the complexity of reverse iteration. break } } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/server.go#L846-L863
func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error { if s.cluster.IsIDRemoved(types.ID(m.From)) { if lg := s.getLogger(); lg != nil { lg.Warn( "rejected Raft message from removed member", zap.String("local-member-id", s.ID().String()), zap.String("removed-member-id", types.ID(m.From).String()), ) } else { plog.Warningf("reject message from removed member %s", types.ID(m.From).String()) } return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member") } if m.Type == raftpb.MsgApp { s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size()) } return s.r.Step(ctx, m) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/op.go#L293-L295
func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/cluster_util.go#L58-L60
func GetClusterFromRemotePeers(lg *zap.Logger, urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) { return getClusterFromRemotePeers(lg, urls, 10*time.Second, true, rt) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/contention/contention.go#L36-L41
func NewTimeoutDetector(maxDuration time.Duration) *TimeoutDetector { return &TimeoutDetector{ maxDuration: maxDuration, records: make(map[uint64]time.Time), } }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/configuration.go#L190-L275
func nextConfiguration(current Configuration, currentIndex uint64, change configurationChangeRequest) (Configuration, error) { if change.prevIndex > 0 && change.prevIndex != currentIndex { return Configuration{}, fmt.Errorf("Configuration changed since %v (latest is %v)", change.prevIndex, currentIndex) } configuration := current.Clone() switch change.command { case AddStaging: // TODO: barf on new address? newServer := Server{ // TODO: This should add the server as Staging, to be automatically // promoted to Voter later. However, the promotion to Voter is not yet // implemented, and doing so is not trivial with the way the leader loop // coordinates with the replication goroutines today. So, for now, the // server will have a vote right away, and the Promote case below is // unused. Suffrage: Voter, ID: change.serverID, Address: change.serverAddress, } found := false for i, server := range configuration.Servers { if server.ID == change.serverID { if server.Suffrage == Voter { configuration.Servers[i].Address = change.serverAddress } else { configuration.Servers[i] = newServer } found = true break } } if !found { configuration.Servers = append(configuration.Servers, newServer) } case AddNonvoter: newServer := Server{ Suffrage: Nonvoter, ID: change.serverID, Address: change.serverAddress, } found := false for i, server := range configuration.Servers { if server.ID == change.serverID { if server.Suffrage != Nonvoter { configuration.Servers[i].Address = change.serverAddress } else { configuration.Servers[i] = newServer } found = true break } } if !found { configuration.Servers = append(configuration.Servers, newServer) } case DemoteVoter: for i, server := range configuration.Servers { if server.ID == change.serverID { configuration.Servers[i].Suffrage = Nonvoter break } } case RemoveServer: for i, server := range configuration.Servers { if server.ID == change.serverID { configuration.Servers = append(configuration.Servers[:i], configuration.Servers[i+1:]...) break } } case Promote: for i, server := range configuration.Servers { if server.ID == change.serverID && server.Suffrage == Staging { configuration.Servers[i].Suffrage = Voter break } } } // Make sure we didn't do something bad like remove the last voter if err := checkConfiguration(configuration); err != nil { return Configuration{}, err } return configuration, nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/fuzzy/transport.go#L210-L212
func (t *transport) RequestVote(id raft.ServerID, target raft.ServerAddress, args *raft.RequestVoteRequest, resp *raft.RequestVoteResponse) error { return t.sendRPC(string(target), args, resp) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/watcher_group.go#L223-L236
func (wg *watcherGroup) choose(maxWatchers int, curRev, compactRev int64) (*watcherGroup, int64) { if len(wg.watchers) < maxWatchers { return wg, wg.chooseAll(curRev, compactRev) } ret := newWatcherGroup() for w := range wg.watchers { if maxWatchers <= 0 { break } maxWatchers-- ret.add(w) } return &ret, ret.chooseAll(curRev, compactRev) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2stats/server.go#L91-L110
func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) { ss.Lock() defer ss.Unlock() now := time.Now() ss.State = raft.StateFollower if leader != ss.LeaderInfo.Name { ss.LeaderInfo.Name = leader ss.LeaderInfo.StartTime = now } ss.recvRateQueue.Insert( &RequestStats{ SendingTime: now, Size: reqSize, }, ) ss.RecvAppendRequestCnt++ }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/move_leader_command.go#L26-L33
func NewMoveLeaderCommand() *cobra.Command { cmd := &cobra.Command{ Use: "move-leader <transferee-member-id>", Short: "Transfers leadership to another etcd cluster member.", Run: transferLeadershipCommandFunc, } return cmd }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv2/command/set_command.go#L51-L73
func setCommandFunc(c *cli.Context, ki client.KeysAPI) { if len(c.Args()) == 0 { handleError(c, ExitBadArgs, errors.New("key required")) } key := c.Args()[0] value, err := argOrStdin(c.Args(), os.Stdin, 1) if err != nil { handleError(c, ExitBadArgs, errors.New("value required")) } ttl := c.Int("ttl") prevValue := c.String("swap-with-value") prevIndex := c.Int("swap-with-index") ctx, cancel := contextWithTotalTimeout(c) resp, err := ki.Set(ctx, key, value, &client.SetOptions{TTL: time.Duration(ttl) * time.Second, PrevIndex: uint64(prevIndex), PrevValue: prevValue}) cancel() if err != nil { handleError(c, ExitServerError, err) } printResponseKey(resp, c.GlobalString("output")) }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/api.go#L798-L811
func (r *Raft) Shutdown() Future { r.shutdownLock.Lock() defer r.shutdownLock.Unlock() if !r.shutdown { close(r.shutdownCh) r.shutdown = true r.setState(Shutdown) return &shutdownFuture{r} } // avoid closing transport twice return &shutdownFuture{nil} }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/rpcpb/member.go#L220-L232
func (m *Member) IsLeader() (bool, error) { cli, err := m.CreateEtcdClient() if err != nil { return false, fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint) } defer cli.Close() resp, err := cli.Status(context.Background(), m.EtcdClientEndpoint) if err != nil { return false, err } return resp.Header.MemberId == resp.Leader, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/server.go#L1737-L1774
func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) { cc.ID = s.reqIDGen.Next() ch := s.w.Register(cc.ID) start := time.Now() if err := s.r.ProposeConfChange(ctx, cc); err != nil { s.w.Trigger(cc.ID, nil) return nil, err } select { case x := <-ch: if x == nil { if lg := s.getLogger(); lg != nil { lg.Panic("failed to configure") } else { plog.Panicf("configure trigger value should never be nil") } } resp := x.(*confChangeResponse) if lg := s.getLogger(); lg != nil { lg.Info( "applied a configuration change through raft", zap.String("local-member-id", s.ID().String()), zap.String("raft-conf-change", cc.Type.String()), zap.String("raft-conf-change-node-id", types.ID(cc.NodeID).String()), ) } return resp.membs, resp.err case <-ctx.Done(): s.w.Trigger(cc.ID, nil) // GC wait return nil, s.parseProposeCtxErr(ctx.Err(), start) case <-s.stopping: return nil, ErrStopped } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/op.go#L264-L290
func OpPut(key, val string, opts ...OpOption) Op { ret := Op{t: tPut, key: []byte(key), val: []byte(val)} ret.applyOpts(opts) switch { case ret.end != nil: panic("unexpected range in put") case ret.limit != 0: panic("unexpected limit in put") case ret.rev != 0: panic("unexpected revision in put") case ret.sort != nil: panic("unexpected sort in put") case ret.serializable: panic("unexpected serializable in put") case ret.countOnly: panic("unexpected countOnly in put") case ret.minModRev != 0, ret.maxModRev != 0: panic("unexpected mod revision filter in put") case ret.minCreateRev != 0, ret.maxCreateRev != 0: panic("unexpected create revision filter in put") case ret.filterDelete, ret.filterPut: panic("unexpected filter in put") case ret.createdNotify: panic("unexpected createdNotify in put") } return ret }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/embed/config.go#L619-L665
func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, token string, err error) { token = cfg.InitialClusterToken switch { case cfg.Durl != "": urlsmap = types.URLsMap{} // If using discovery, generate a temporary cluster based on // self's advertised peer URLs urlsmap[cfg.Name] = cfg.APUrls token = cfg.Durl case cfg.DNSCluster != "": clusterStrs, cerr := cfg.GetDNSClusterNames() lg := cfg.logger if cerr != nil { if lg != nil { lg.Warn("failed to resolve during SRV discovery", zap.Error(cerr)) } else { plog.Errorf("couldn't resolve during SRV discovery (%v)", cerr) } return nil, "", cerr } for _, s := range clusterStrs { if lg != nil { lg.Info("got bootstrap from DNS for etcd-server", zap.String("node", s)) } else { plog.Noticef("got bootstrap from DNS for etcd-server at %s", s) } } clusterStr := strings.Join(clusterStrs, ",") if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.TrustedCAFile == "" { cfg.PeerTLSInfo.ServerName = cfg.DNSCluster } urlsmap, err = types.NewURLsMap(clusterStr) // only etcd member must belong to the discovered cluster. // proxy does not need to belong to the discovered cluster. if which == "etcd" { if _, ok := urlsmap[cfg.Name]; !ok { return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name) } } default: // We're statically configured, and cluster has appropriately been set. urlsmap, err = types.NewURLsMap(cfg.InitialCluster) } return urlsmap, token, err }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/y.go#L117-L122
func ParseTs(key []byte) uint64 { if len(key) <= 8 { return 0 } return math.MaxUint64 - binary.BigEndian.Uint64(key[len(key)-8:]) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/error.go#L60-L64
func AssertTruef(b bool, format string, args ...interface{}) { if !b { log.Fatalf("%+v", errors.Errorf(format, args...)) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/rpcpb/etcd_config.go#L67-L107
func (e *Etcd) Flags() (fs []string) { tp := reflect.TypeOf(*e) vo := reflect.ValueOf(*e) for _, name := range etcdFields { field, ok := tp.FieldByName(name) if !ok { panic(fmt.Errorf("field %q not found", name)) } fv := reflect.Indirect(vo).FieldByName(name) var sv string switch fv.Type().Kind() { case reflect.String: sv = fv.String() case reflect.Slice: n := fv.Len() sl := make([]string, n) for i := 0; i < n; i++ { sl[i] = fv.Index(i).String() } sv = strings.Join(sl, ",") case reflect.Int64: sv = fmt.Sprintf("%d", fv.Int()) case reflect.Bool: sv = fmt.Sprintf("%v", fv.Bool()) default: panic(fmt.Errorf("field %q (%v) cannot be parsed", name, fv.Type().Kind())) } fname := field.Tag.Get("yaml") // TODO: remove this if fname == "initial-corrupt-check" { fname = "experimental-" + fname } if sv != "" { fs = append(fs, fmt.Sprintf("--%s=%s", fname, sv)) } } return fs }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/lease/leasehttp/http.go#L40-L42
func NewHandler(l lease.Lessor, waitch func() <-chan struct{}) http.Handler { return &leaseHandler{l, waitch} }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/inmem_store.go#L79-L96
func (i *InmemStore) DeleteRange(min, max uint64) error { i.l.Lock() defer i.l.Unlock() for j := min; j <= max; j++ { delete(i.logs, j) } if min <= i.lowIndex { i.lowIndex = max + 1 } if max >= i.highIndex { i.highIndex = min - 1 } if i.lowIndex > i.highIndex { i.lowIndex = 0 i.highIndex = 0 } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/integration/cluster.go#L726-L750
func NewClientV3(m *member) (*clientv3.Client, error) { if m.grpcAddr == "" { return nil, fmt.Errorf("member not configured for grpc") } cfg := clientv3.Config{ Endpoints: []string{m.grpcAddr}, DialTimeout: 5 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, MaxCallSendMsgSize: m.clientMaxCallSendMsgSize, MaxCallRecvMsgSize: m.clientMaxCallRecvMsgSize, } if m.ClientTLSInfo != nil { tls, err := m.ClientTLSInfo.ClientConfig() if err != nil { return nil, err } cfg.TLS = tls } if m.DialOptions != nil { cfg.DialOptions = append(cfg.DialOptions, m.DialOptions...) } return newClientV3(cfg) }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/session.go#L48-L52
func (s *Session) Clear() { for k := range s.Session.Values { s.Delete(k) } }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/raft.go#L1149-L1161
func (r *Raft) processConfigurationLogEntry(entry *Log) { if entry.Type == LogConfiguration { r.configurations.committed = r.configurations.latest r.configurations.committedIndex = r.configurations.latestIndex r.configurations.latest = decodeConfiguration(entry.Data) r.configurations.latestIndex = entry.Index } else if entry.Type == LogAddPeerDeprecated || entry.Type == LogRemovePeerDeprecated { r.configurations.committed = r.configurations.latest r.configurations.committedIndex = r.configurations.latestIndex r.configurations.latest = decodePeers(entry.Data, r.trans) r.configurations.latestIndex = entry.Index } }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/file_snapshot.go#L136-L140
func snapshotName(term, index uint64) string { now := time.Now() msec := now.UnixNano() / int64(time.Millisecond) return fmt.Sprintf("%d-%d-%d", term, index, msec) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/read_only.go#L64-L73
func (ro *readOnly) recvAck(m pb.Message) int { rs, ok := ro.pendingReadIndex[string(m.Context)] if !ok { return 0 } rs.acks[m.From] = struct{}{} // add one to include an ack from local node return len(rs.acks) + 1 }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/embed/serve.go#L85-L202
func (sctx *serveCtx) serve( s *etcdserver.EtcdServer, tlsinfo *transport.TLSInfo, handler http.Handler, errHandler func(error), gopts ...grpc.ServerOption) (err error) { logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) <-s.ReadyNotify() if sctx.lg == nil { plog.Info("ready to serve client requests") } m := cmux.New(sctx.l) v3c := v3client.New(s) servElection := v3election.NewElectionServer(v3c) servLock := v3lock.NewLockServer(v3c) var gs *grpc.Server defer func() { if err != nil && gs != nil { gs.Stop() } }() if sctx.insecure { gs = v3rpc.Server(s, nil, gopts...) v3electionpb.RegisterElectionServer(gs, servElection) v3lockpb.RegisterLockServer(gs, servLock) if sctx.serviceRegister != nil { sctx.serviceRegister(gs) } grpcl := m.Match(cmux.HTTP2()) go func() { errHandler(gs.Serve(grpcl)) }() var gwmux *gw.ServeMux if s.Cfg.EnableGRPCGateway { gwmux, err = sctx.registerGateway([]grpc.DialOption{grpc.WithInsecure()}) if err != nil { return err } } httpmux := sctx.createMux(gwmux, handler) srvhttp := &http.Server{ Handler: createAccessController(sctx.lg, s, httpmux), ErrorLog: logger, // do not log user error } httpl := m.Match(cmux.HTTP1()) go func() { errHandler(srvhttp.Serve(httpl)) }() sctx.serversC <- &servers{grpc: gs, http: srvhttp} if sctx.lg != nil { sctx.lg.Info( "serving client traffic insecurely; this is strongly discouraged!", zap.String("address", sctx.l.Addr().String()), ) } else { plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String()) } } if sctx.secure { tlscfg, tlsErr := tlsinfo.ServerConfig() if tlsErr != nil { return tlsErr } gs = v3rpc.Server(s, tlscfg, gopts...) v3electionpb.RegisterElectionServer(gs, servElection) v3lockpb.RegisterLockServer(gs, servLock) if sctx.serviceRegister != nil { sctx.serviceRegister(gs) } handler = grpcHandlerFunc(gs, handler) var gwmux *gw.ServeMux if s.Cfg.EnableGRPCGateway { dtls := tlscfg.Clone() // trust local server dtls.InsecureSkipVerify = true creds := credentials.NewTLS(dtls) opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)} gwmux, err = sctx.registerGateway(opts) if err != nil { return err } } var tlsl net.Listener tlsl, err = transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo) if err != nil { return err } // TODO: add debug flag; enable logging when debug flag is set httpmux := sctx.createMux(gwmux, handler) srv := &http.Server{ Handler: createAccessController(sctx.lg, s, httpmux), TLSConfig: tlscfg, ErrorLog: logger, // do not log user error } go func() { errHandler(srv.Serve(tlsl)) }() sctx.serversC <- &servers{secure: true, grpc: gs, http: srv} if sctx.lg != nil { sctx.lg.Info( "serving client traffic insecurely", zap.String("address", sctx.l.Addr().String()), ) } else { plog.Infof("serving client requests on %s", sctx.l.Addr().String()) } } close(sctx.serversC) return m.Serve() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/fileutil/fileutil.go#L85-L104
func ZeroToEnd(f *os.File) error { // TODO: support FALLOC_FL_ZERO_RANGE off, err := f.Seek(0, io.SeekCurrent) if err != nil { return err } lenf, lerr := f.Seek(0, io.SeekEnd) if lerr != nil { return lerr } if err = f.Truncate(off); err != nil { return err } // make sure blocks remain allocated if err = Preallocate(f, lenf, true); err != nil { return err } _, err = f.Seek(off, io.SeekStart) return err }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/raft.go#L991-L1009
func (r *Raft) processHeartbeat(rpc RPC) { defer metrics.MeasureSince([]string{"raft", "rpc", "processHeartbeat"}, time.Now()) // Check if we are shutdown, just ignore the RPC select { case <-r.shutdownCh: return default: } // Ensure we are only handling a heartbeat switch cmd := rpc.Command.(type) { case *AppendEntriesRequest: r.appendEntries(rpc, cmd) default: r.logger.Error(fmt.Sprintf("Expected heartbeat, got command: %#v", rpc.Command)) rpc.Respond(nil, fmt.Errorf("unexpected command")) } }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/watermark.go#L78-L81
func (w *WaterMark) Begin(index uint64) { atomic.StoreUint64(&w.lastIndex, index) w.markCh <- mark{index: index, done: false} }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/server.go#L1384-L1421
func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error { now := time.Now() interval := time.Duration(s.Cfg.TickMs) * time.Millisecond if lg := s.getLogger(); lg != nil { lg.Info( "leadership transfer starting", zap.String("local-member-id", s.ID().String()), zap.String("current-leader-member-id", types.ID(lead).String()), zap.String("transferee-member-id", types.ID(transferee).String()), ) } else { plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee)) } s.r.TransferLeadership(ctx, lead, transferee) for s.Lead() != transferee { select { case <-ctx.Done(): // time out return ErrTimeoutLeaderTransfer case <-time.After(interval): } } // TODO: drain all requests, or drop all messages to the old leader if lg := s.getLogger(); lg != nil { lg.Info( "leadership transfer finished", zap.String("local-member-id", s.ID().String()), zap.String("old-leader-member-id", types.ID(lead).String()), zap.String("new-leader-member-id", types.ID(transferee).String()), zap.Duration("took", time.Since(now)), ) } else { plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now)) } return nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/raft.go#L903-L928
func (r *Raft) processLogs(index uint64, future *logFuture) { // Reject logs we've applied already lastApplied := r.getLastApplied() if index <= lastApplied { r.logger.Warn(fmt.Sprintf("Skipping application of old log: %d", index)) return } // Apply all the preceding logs for idx := r.getLastApplied() + 1; idx <= index; idx++ { // Get the log, either from the future or from our log store if future != nil && future.log.Index == idx { r.processLog(&future.log, future) } else { l := new(Log) if err := r.logs.GetLog(idx, l); err != nil { r.logger.Error(fmt.Sprintf("Failed to get log at %d: %v", idx, err)) panic(err) } r.processLog(l, nil) } // Update the lastApplied index and term r.setLastApplied(idx) } }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/table/table.go#L120-L189
func OpenTable(fd *os.File, mode options.FileLoadingMode, cksum []byte) (*Table, error) { fileInfo, err := fd.Stat() if err != nil { // It's OK to ignore fd.Close() errs in this function because we have only read // from the file. _ = fd.Close() return nil, y.Wrap(err) } filename := fileInfo.Name() id, ok := ParseFileID(filename) if !ok { _ = fd.Close() return nil, errors.Errorf("Invalid filename: %s", filename) } t := &Table{ fd: fd, ref: 1, // Caller is given one reference. id: id, loadingMode: mode, } t.tableSize = int(fileInfo.Size()) // We first load to RAM, so we can read the index and do checksum. if err := t.loadToRAM(); err != nil { return nil, err } // Enforce checksum before we read index. Otherwise, if the file was // truncated, we'd end up with panics in readIndex. if len(cksum) > 0 && !bytes.Equal(t.Checksum, cksum) { return nil, fmt.Errorf( "CHECKSUM_MISMATCH: Table checksum does not match checksum in MANIFEST."+ " NOT including table %s. This would lead to missing data."+ "\n sha256 %x Expected\n sha256 %x Found\n", filename, cksum, t.Checksum) } if err := t.readIndex(); err != nil { return nil, y.Wrap(err) } it := t.NewIterator(false) defer it.Close() it.Rewind() if it.Valid() { t.smallest = it.Key() } it2 := t.NewIterator(true) defer it2.Close() it2.Rewind() if it2.Valid() { t.biggest = it2.Key() } switch mode { case options.LoadToRAM: // No need to do anything. t.mmap is already filled. case options.MemoryMap: t.mmap, err = y.Mmap(fd, false, fileInfo.Size()) if err != nil { _ = fd.Close() return nil, y.Wrapf(err, "Unable to map file") } case options.FileIO: t.mmap = nil default: panic(fmt.Sprintf("Invalid loading mode: %v", mode)) } return t, nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/inmem_transport.go#L194-L210
func (i *InmemTransport) Disconnect(peer ServerAddress) { i.Lock() defer i.Unlock() delete(i.peers, peer) // Disconnect any pipelines n := len(i.pipelines) for idx := 0; idx < n; idx++ { if i.pipelines[idx].peerAddr == peer { i.pipelines[idx].Close() i.pipelines[idx], i.pipelines[n-1] = i.pipelines[n-1], nil idx-- n-- } } i.pipelines = i.pipelines[:n] }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/tools/etcd-dump-logs/main.go#L223-L228
func printInternalRaftRequest(entry raftpb.Entry) { var rr etcdserverpb.InternalRaftRequest if err := rr.Unmarshal(entry.Data); err == nil { fmt.Printf("%4d\t%10d\tnorm\t%s", entry.Term, entry.Index, rr.String()) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/fileutil/read_dir.go#L44-L70
func ReadDir(d string, opts ...ReadDirOption) ([]string, error) { op := &ReadDirOp{} op.applyOpts(opts) dir, err := os.Open(d) if err != nil { return nil, err } defer dir.Close() names, err := dir.Readdirnames(-1) if err != nil { return nil, err } sort.Strings(names) if op.ext != "" { tss := make([]string, 0) for _, v := range names { if filepath.Ext(v) == op.ext { tss = append(tss, v) } } names = tss } return names, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/node.go#L70-L80
func newDir(store *store, nodePath string, createdIndex uint64, parent *node, expireTime time.Time) *node { return &node{ Path: nodePath, CreatedIndex: createdIndex, ModifiedIndex: createdIndex, Parent: parent, ExpireTime: expireTime, Children: make(map[string]*node), store: store, } }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/file_snapshot.go#L110-L115
func NewFileSnapshotStore(base string, retain int, logOutput io.Writer) (*FileSnapshotStore, error) { if logOutput == nil { logOutput = os.Stderr } return NewFileSnapshotStoreWithLogger(base, retain, log.New(logOutput, "", log.LstdFlags)) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/adt/interval_tree.go#L420-L430
func (ivt *IntervalTree) find(ivl Interval) (ret *intervalNode) { f := func(n *intervalNode) bool { if n.iv.Ivl != ivl { return true } ret = n return false } ivt.root.visit(&ivl, f) return ret }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/skl/skl.go#L424-L427
func (s *Iterator) Value() y.ValueStruct { valOffset, valSize := s.n.getValueOffset() return s.list.arena.getVal(valOffset, valSize) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/value.go#L936-L1006
func (vlog *valueLog) write(reqs []*request) error { vlog.filesLock.RLock() maxFid := atomic.LoadUint32(&vlog.maxFid) curlf := vlog.filesMap[maxFid] vlog.filesLock.RUnlock() var buf bytes.Buffer toDisk := func() error { if buf.Len() == 0 { return nil } vlog.elog.Printf("Flushing %d blocks of total size: %d", len(reqs), buf.Len()) n, err := curlf.fd.Write(buf.Bytes()) if err != nil { return errors.Wrapf(err, "Unable to write to value log file: %q", curlf.path) } buf.Reset() y.NumWrites.Add(1) y.NumBytesWritten.Add(int64(n)) vlog.elog.Printf("Done") atomic.AddUint32(&vlog.writableLogOffset, uint32(n)) if vlog.woffset() > uint32(vlog.opt.ValueLogFileSize) || vlog.numEntriesWritten > vlog.opt.ValueLogMaxEntries { var err error if err = curlf.doneWriting(vlog.woffset()); err != nil { return err } newid := atomic.AddUint32(&vlog.maxFid, 1) y.AssertTruef(newid > 0, "newid has overflown uint32: %v", newid) newlf, err := vlog.createVlogFile(newid) if err != nil { return err } curlf = newlf } return nil } for i := range reqs { b := reqs[i] b.Ptrs = b.Ptrs[:0] for j := range b.Entries { e := b.Entries[j] var p valuePointer p.Fid = curlf.fid // Use the offset including buffer length so far. p.Offset = vlog.woffset() + uint32(buf.Len()) plen, err := encodeEntry(e, &buf) // Now encode the entry into buffer. if err != nil { return err } p.Len = uint32(plen) b.Ptrs = append(b.Ptrs, p) } vlog.numEntriesWritten += uint32(len(b.Entries)) // We write to disk here so that all entries that are part of the same transaction are // written to the same vlog file. writeNow := vlog.woffset()+uint32(buf.Len()) > uint32(vlog.opt.ValueLogFileSize) || vlog.numEntriesWritten > uint32(vlog.opt.ValueLogMaxEntries) if writeNow { if err := toDisk(); err != nil { return err } } } return toDisk() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3rpc/watch.go#L72-L83
func GetProgressReportInterval() time.Duration { progressReportIntervalMu.RLock() interval := progressReportInterval progressReportIntervalMu.RUnlock() // add rand(1/10*progressReportInterval) as jitter so that etcdserver will not // send progress notifications to watchers around the same time even when watchers // are created around the same time (which is common when a client restarts itself). jitter := time.Duration(rand.Int63n(int64(interval) / 10)) return interval + jitter }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/version/version.go#L50-L56
func Cluster(v string) string { vs := strings.Split(v, ".") if len(vs) <= 2 { return v } return fmt.Sprintf("%s.%s", vs[0], vs[1]) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/mock/mockserver/mockserver.go#L146-L158
func (ms *MockServers) StopAt(idx int) { ms.mu.Lock() defer ms.mu.Unlock() if ms.Servers[idx].ln == nil { return } ms.Servers[idx].GrpcServer.Stop() ms.Servers[idx].GrpcServer = nil ms.Servers[idx].ln = nil ms.wg.Done() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/flags/unique_strings.go#L69-L71
func UniqueStringsFromFlag(fs *flag.FlagSet, flagName string) []string { return (*fs.Lookup(flagName).Value.(*UniqueStringsValue)).stringSlice() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/log.go#L165-L168
func (l *raftLog) hasNextEnts() bool { off := max(l.applied+1, l.firstIndex()) return l.committed+1 > off }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/net_transport.go#L350-L362
func (n *NetworkTransport) returnConn(conn *netConn) { n.connPoolLock.Lock() defer n.connPoolLock.Unlock() key := conn.target conns, _ := n.connPool[key] if !n.IsShutdown() && len(conns) < n.maxPool { n.connPool[key] = append(conns, conn) } else { conn.Release() } }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/raft.go#L89-L97
func (r *Raft) setLeader(leader ServerAddress) { r.leaderLock.Lock() oldLeader := r.leader r.leader = leader r.leaderLock.Unlock() if oldLeader != leader { r.observe(LeaderObservation{leader: leader}) } }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/replication.go#L488-L500
func (r *Raft) setupAppendEntries(s *followerReplication, req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { req.RPCHeader = r.getRPCHeader() req.Term = s.currentTerm req.Leader = r.trans.EncodePeer(r.localID, r.localAddr) req.LeaderCommitIndex = r.getCommitIndex() if err := r.setPreviousLog(req, nextIndex); err != nil { return err } if err := r.setNewLogs(req, nextIndex, lastIndex); err != nil { return err } return nil }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/render/render.go#L17-L52
func New(opts Options) *Engine { if opts.Helpers == nil { opts.Helpers = map[string]interface{}{} } if opts.TemplateEngines == nil { opts.TemplateEngines = map[string]TemplateEngine{} } if _, ok := opts.TemplateEngines["html"]; !ok { opts.TemplateEngines["html"] = plush.BuffaloRenderer } if _, ok := opts.TemplateEngines["text"]; !ok { opts.TemplateEngines["text"] = plush.BuffaloRenderer } if _, ok := opts.TemplateEngines["txt"]; !ok { opts.TemplateEngines["txt"] = plush.BuffaloRenderer } if _, ok := opts.TemplateEngines["js"]; !ok { opts.TemplateEngines["js"] = plush.BuffaloRenderer } if _, ok := opts.TemplateEngines["md"]; !ok { opts.TemplateEngines["md"] = MDTemplateEngine } if _, ok := opts.TemplateEngines["tmpl"]; !ok { opts.TemplateEngines["tmpl"] = GoTemplateEngine } if opts.DefaultContentType == "" { opts.DefaultContentType = "text/html; charset=utf-8" } e := &Engine{ Options: opts, } return e }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/txn.go#L429-L479
func (txn *Txn) Get(key []byte) (item *Item, rerr error) { if len(key) == 0 { return nil, ErrEmptyKey } else if txn.discarded { return nil, ErrDiscardedTxn } item = new(Item) if txn.update { if e, has := txn.pendingWrites[string(key)]; has && bytes.Equal(key, e.Key) { if isDeletedOrExpired(e.meta, e.ExpiresAt) { return nil, ErrKeyNotFound } // Fulfill from cache. item.meta = e.meta item.val = e.Value item.userMeta = e.UserMeta item.key = key item.status = prefetched item.version = txn.readTs item.expiresAt = e.ExpiresAt // We probably don't need to set db on item here. return item, nil } // Only track reads if this is update txn. No need to track read if txn serviced it // internally. txn.addReadKey(key) } seek := y.KeyWithTs(key, txn.readTs) vs, err := txn.db.get(seek) if err != nil { return nil, errors.Wrapf(err, "DB::Get key: %q", key) } if vs.Value == nil && vs.Meta == 0 { return nil, ErrKeyNotFound } if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) { return nil, ErrKeyNotFound } item.key = key item.version = vs.Version item.meta = vs.Meta item.userMeta = vs.UserMeta item.db = txn.db item.vptr = vs.Value // TODO: Do we need to copy this over? item.txn = txn item.expiresAt = vs.ExpiresAt return item, nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/api.go#L595-L600
func (r *Raft) Leader() ServerAddress { r.leaderLock.RLock() leader := r.leader r.leaderLock.RUnlock() return leader }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/proxy/grpcproxy/cache/store.go#L131-L154
func (c *cache) Invalidate(key, endkey []byte) { c.mu.Lock() defer c.mu.Unlock() var ( ivs []*adt.IntervalValue ivl adt.Interval ) if len(endkey) == 0 { ivl = adt.NewStringAffinePoint(string(key)) } else { ivl = adt.NewStringAffineInterval(string(key), string(endkey)) } ivs = c.cachedRanges.Stab(ivl) for _, iv := range ivs { keys := iv.Val.(map[string]struct{}) for key := range keys { c.lru.Remove(key) } } // delete after removing all keys since it is destructive to 'ivs' c.cachedRanges.Delete(ivl) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/iterator.go#L194-L199
func (s *MergeIterator) Key() []byte { if len(s.h) == 0 { return nil } return s.h[0].itr.Key() }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/txn.go#L419-L425
func (txn *Txn) Delete(key []byte) error { e := &Entry{ Key: key, meta: bitDelete, } return txn.modify(e) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/lease_command.go#L145-L151
func leaseListCommandFunc(cmd *cobra.Command, args []string) { resp, rerr := mustClientFromCmd(cmd).Leases(context.TODO()) if rerr != nil { ExitWithError(ExitBadConnection, rerr) } display.Leases(*resp) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/metrics.go#L54-L68
func init() { NumReads = expvar.NewInt("badger_disk_reads_total") NumWrites = expvar.NewInt("badger_disk_writes_total") NumBytesRead = expvar.NewInt("badger_read_bytes") NumBytesWritten = expvar.NewInt("badger_written_bytes") NumLSMGets = expvar.NewMap("badger_lsm_level_gets_total") NumLSMBloomHits = expvar.NewMap("badger_lsm_bloom_hits_total") NumGets = expvar.NewInt("badger_gets_total") NumPuts = expvar.NewInt("badger_puts_total") NumBlockedPuts = expvar.NewInt("badger_blocked_puts_total") NumMemtableGets = expvar.NewInt("badger_memtable_gets_total") LSMSize = expvar.NewMap("badger_lsm_size_bytes") VlogSize = expvar.NewMap("badger_vlog_size_bytes") PendingWrites = expvar.NewMap("badger_pending_writes_total") }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/wal/util.go#L41-L57
func searchIndex(lg *zap.Logger, names []string, index uint64) (int, bool) { for i := len(names) - 1; i >= 0; i-- { name := names[i] _, curIndex, err := parseWALName(name) if err != nil { if lg != nil { lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err)) } else { plog.Panicf("parse correct name should never fail: %v", err) } } if index >= curIndex { return i, true } } return -1, false }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/node.go#L120-L129
func (n *node) Write(value string, index uint64) *v2error.Error { if n.IsDir() { return v2error.NewError(v2error.EcodeNotFile, "", n.store.CurrentIndex) } n.Value = value n.ModifiedIndex = index return nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/file_snapshot.go#L208-L224
func (f *FileSnapshotStore) List() ([]*SnapshotMeta, error) { // Get the eligible snapshots snapshots, err := f.getSnapshots() if err != nil { f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err) return nil, err } var snapMeta []*SnapshotMeta for _, meta := range snapshots { snapMeta = append(snapMeta, &meta.SnapshotMeta) if len(snapMeta) == f.retain { break } } return snapMeta, nil }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/session.go#L55-L65
func (a *App) getSession(r *http.Request, w http.ResponseWriter) *Session { if a.root != nil { return a.root.getSession(r, w) } session, _ := a.SessionStore.Get(r, a.SessionName) return &Session{ Session: session, req: r, res: w, } }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/render/template.go#L188-L194
func (e *Engine) Template(c string, names ...string) Renderer { return &templateRenderer{ Engine: e, contentType: c, names: names, } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/namespace/watch.go#L36-L38
func NewWatcher(w clientv3.Watcher, prefix string) clientv3.Watcher { return &watcherPrefix{Watcher: w, pfx: prefix, stopc: make(chan struct{})} }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/balancer/resolver/endpoint/endpoint.go#L93-L95
func (e *ResolverGroup) Target(endpoint string) string { return Target(e.id, endpoint) }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/fuzzy/transport.go#L240-L250
func (t *transport) AppendEntriesPipeline(id raft.ServerID, target raft.ServerAddress) (raft.AppendPipeline, error) { p := &pipeline{ t: t, id: id, target: target, work: make(chan *appendEntry, 100), consumer: make(chan raft.AppendFuture, 100), } go p.run() return p, nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/inmem_snapshot.go#L89-L93
func (s *InmemSnapshotSink) Write(p []byte) (n int, err error) { written, err := io.Copy(s.contents, bytes.NewReader(p)) s.meta.Size += written return int(written), err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/client.go#L280-L287
func (c *Client) Dial(ep string) (*grpc.ClientConn, error) { creds := c.directDialCreds(ep) // Use the grpc passthrough resolver to directly dial a single endpoint. // This resolver passes through the 'unix' and 'unixs' endpoints schemes used // by etcd without modification, allowing us to directly dial endpoints and // using the same dial functions that we use for load balancer dialing. return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/txn.go#L132-L136
func (o *oracle) setDiscardTs(ts uint64) { o.Lock() defer o.Unlock() o.discardTs = ts }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/util.go#L31-L37
func randomTimeout(minVal time.Duration) <-chan time.Time { if minVal == 0 { return nil } extra := (time.Duration(rand.Int63()) % minVal) return time.After(minVal + extra) }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/raft.go#L426-L467
func (r *Raft) startStopReplication() { inConfig := make(map[ServerID]bool, len(r.configurations.latest.Servers)) lastIdx := r.getLastIndex() // Start replication goroutines that need starting for _, server := range r.configurations.latest.Servers { if server.ID == r.localID { continue } inConfig[server.ID] = true if _, ok := r.leaderState.replState[server.ID]; !ok { r.logger.Info(fmt.Sprintf("Added peer %v, starting replication", server.ID)) s := &followerReplication{ peer: server, commitment: r.leaderState.commitment, stopCh: make(chan uint64, 1), triggerCh: make(chan struct{}, 1), currentTerm: r.getCurrentTerm(), nextIndex: lastIdx + 1, lastContact: time.Now(), notify: make(map[*verifyFuture]struct{}), notifyCh: make(chan struct{}, 1), stepDown: r.leaderState.stepDown, } r.leaderState.replState[server.ID] = s r.goFunc(func() { r.replicate(s) }) asyncNotifyCh(s.triggerCh) } } // Stop replication goroutines that need stopping for serverID, repl := range r.leaderState.replState { if inConfig[serverID] { continue } // Replicate up to lastIdx and stop r.logger.Info(fmt.Sprintf("Removed peer %v, stopping replication after %v", serverID, lastIdx)) repl.stopCh <- lastIdx close(repl.stopCh) delete(r.leaderState.replState, serverID) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/proxy/grpcproxy/cache/store.go#L74-L109
func (c *cache) Add(req *pb.RangeRequest, resp *pb.RangeResponse) { key := keyFunc(req) c.mu.Lock() defer c.mu.Unlock() if req.Revision > c.compactedRev { c.lru.Add(key, resp) } // we do not need to invalidate a request with a revision specified. // so we do not need to add it into the reverse index. if req.Revision != 0 { return } var ( iv *adt.IntervalValue ivl adt.Interval ) if len(req.RangeEnd) != 0 { ivl = adt.NewStringAffineInterval(string(req.Key), string(req.RangeEnd)) } else { ivl = adt.NewStringAffinePoint(string(req.Key)) } iv = c.cachedRanges.Find(ivl) if iv == nil { val := map[string]struct{}{key: {}} c.cachedRanges.Insert(ivl, val) } else { val := iv.Val.(map[string]struct{}) val[key] = struct{}{} iv.Val = val } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/client.go#L654-L670
func IsConnCanceled(err error) bool { if err == nil { return false } // >= gRPC v1.10.x s, ok := status.FromError(err) if ok { // connection is canceled or server has already closed the connection return s.Code() == codes.Canceled || s.Message() == "transport is closing" } // >= gRPC v1.10.x if err == context.Canceled { return true } // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")' return strings.Contains(err.Error(), "grpc: the client connection is closing") }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/election.go#L44-L46
func NewElection(s *Session, pfx string) *Election { return &Election{session: s, keyPrefix: pfx + "/"} }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/stm.go#L89-L102
func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) { opts := &stmOptions{ctx: c.Ctx()} for _, f := range so { f(opts) } if len(opts.prefetch) != 0 { f := apply apply = func(s STM) error { s.Get(opts.prefetch...) return f(s) } } return runSTM(mkSTM(c, opts), apply) }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/response.go#L53-L58
func (w *Response) CloseNotify() <-chan bool { if cn, ok := w.ResponseWriter.(closeNotifier); ok { return cn.CloseNotify() } return nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/stream.go#L128-L202
func (st *Stream) produceKVs(ctx context.Context) error { var size int var txn *Txn if st.readTs > 0 { txn = st.db.NewTransactionAt(st.readTs, false) } else { txn = st.db.NewTransaction(false) } defer txn.Discard() iterate := func(kr keyRange) error { iterOpts := DefaultIteratorOptions iterOpts.AllVersions = true iterOpts.Prefix = st.Prefix iterOpts.PrefetchValues = false itr := txn.NewIterator(iterOpts) defer itr.Close() outList := new(pb.KVList) var prevKey []byte for itr.Seek(kr.left); itr.Valid(); { // it.Valid would only return true for keys with the provided Prefix in iterOpts. item := itr.Item() if bytes.Equal(item.Key(), prevKey) { itr.Next() continue } prevKey = append(prevKey[:0], item.Key()...) // Check if we reached the end of the key range. if len(kr.right) > 0 && bytes.Compare(item.Key(), kr.right) >= 0 { break } // Check if we should pick this key. if st.ChooseKey != nil && !st.ChooseKey(item) { continue } // Now convert to key value. list, err := st.KeyToList(item.KeyCopy(nil), itr) if err != nil { return err } if list == nil || len(list.Kv) == 0 { continue } outList.Kv = append(outList.Kv, list.Kv...) size += list.Size() if size >= pageSize { st.kvChan <- outList outList = new(pb.KVList) size = 0 } } if len(outList.Kv) > 0 { st.kvChan <- outList } return nil } for { select { case kr, ok := <-st.rangeCh: if !ok { // Done with the keys. return nil } if err := iterate(kr); err != nil { return err } case <-ctx.Done(): return ctx.Err() } } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/watcher_group.go#L164-L182
func (wg *watcherGroup) add(wa *watcher) { wg.watchers.add(wa) if wa.end == nil { wg.keyWatchers.add(wa) return } // interval already registered? ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end)) if iv := wg.ranges.Find(ivl); iv != nil { iv.Val.(watcherSet).add(wa) return } // not registered, put in interval tree ws := make(watcherSet) ws.add(wa) wg.ranges.Insert(ivl, ws) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/rafthttp/stream.go#L738-L746
func checkStreamSupport(v *semver.Version, t streamType) bool { nv := &semver.Version{Major: v.Major, Minor: v.Minor} for _, s := range supportedStream[nv.String()] { if s == t { return true } } return false }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/transport/listener.go#L412-L417
func IsClosedConnError(err error) bool { // 'use of closed network connection' (Go <=1.8) // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing) // 'mux: listener closed' (cmux.ErrListenerClosed) return err != nil && strings.Contains(err.Error(), "closed") }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/skl/arena.go#L117-L119
func (s *Arena) getKey(offset uint32, size uint16) []byte { return s.buf[offset : offset+uint32(size)] }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/embed/serve.go#L302-L304
func createAccessController(lg *zap.Logger, s *etcdserver.EtcdServer, mux *http.ServeMux) http.Handler { return &accessController{lg: lg, s: s, mux: mux} }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/route_mappings.go#L217-L231
func (a *App) Group(groupPath string) *App { g := New(a.Options) g.Prefix = path.Join(a.Prefix, groupPath) g.Name = g.Prefix g.router = a.router g.Middleware = a.Middleware.clone() g.ErrorHandlers = a.ErrorHandlers g.root = a if a.root != nil { g.root = a.root } a.children = append(a.children, g) return g }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/auth/options.go#L54-L60
func (opts *jwtOptions) ParseWithDefaults(optMap map[string]string) error { if opts.TTL == 0 && optMap[optTTL] == "" { opts.TTL = DefaultTTL } return opts.Parse(optMap) }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/render/sse.go#L54-L59
func (es *EventSource) CloseNotify() <-chan bool { if cn, ok := es.w.(closeNotifier); ok { return cn.CloseNotify() } return nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/config.go#L205-L219
func DefaultConfig() *Config { return &Config{ ProtocolVersion: ProtocolVersionMax, HeartbeatTimeout: 1000 * time.Millisecond, ElectionTimeout: 1000 * time.Millisecond, CommitTimeout: 50 * time.Millisecond, MaxAppendEntries: 64, ShutdownOnRemove: true, TrailingLogs: 10240, SnapshotInterval: 120 * time.Second, SnapshotThreshold: 8192, LeaderLeaseTimeout: 500 * time.Millisecond, LogLevel: "DEBUG", } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/client.go#L117-L119
func NewFromURL(url string) (*Client, error) { return New(Config{Endpoints: []string{url}}) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/types/urlsmap.go#L45-L55
func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) { var err error um := URLsMap{} for k, v := range m { um[k], err = NewURLs(strings.Split(v, sep)) if err != nil { return nil, err } } return um, nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/raft.go#L1477-L1484
func (r *Raft) setState(state RaftState) { r.setLeader("") oldState := r.raftState.getState() r.raftState.setState(state) if oldState != state { r.observe(state) } }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/db.go#L919-L953
func (db *DB) calculateSize() { newInt := func(val int64) *expvar.Int { v := new(expvar.Int) v.Add(val) return v } totalSize := func(dir string) (int64, int64) { var lsmSize, vlogSize int64 err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } ext := filepath.Ext(path) if ext == ".sst" { lsmSize += info.Size() } else if ext == ".vlog" { vlogSize += info.Size() } return nil }) if err != nil { db.elog.Printf("Got error while calculating total size of directory: %s", dir) } return lsmSize, vlogSize } lsmSize, vlogSize := totalSize(db.opt.Dir) y.LSMSize.Set(db.opt.Dir, newInt(lsmSize)) // If valueDir is different from dir, we'd have to do another walk. if db.opt.ValueDir != db.opt.Dir { _, vlogSize = totalSize(db.opt.ValueDir) } y.VlogSize.Set(db.opt.Dir, newInt(vlogSize)) }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/route_mappings.go#L24-L26
func (a *App) GET(p string, h Handler) *RouteInfo { return a.addRoute("GET", p, h) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv2/command/backup_command.go#L58-L103
func handleBackup(c *cli.Context) error { var srcWAL string var destWAL string withV3 := c.Bool("with-v3") srcSnap := filepath.Join(c.String("data-dir"), "member", "snap") destSnap := filepath.Join(c.String("backup-dir"), "member", "snap") if c.String("wal-dir") != "" { srcWAL = c.String("wal-dir") } else { srcWAL = filepath.Join(c.String("data-dir"), "member", "wal") } if c.String("backup-wal-dir") != "" { destWAL = c.String("backup-wal-dir") } else { destWAL = filepath.Join(c.String("backup-dir"), "member", "wal") } if err := fileutil.CreateDirAll(destSnap); err != nil { log.Fatalf("failed creating backup snapshot dir %v: %v", destSnap, err) } walsnap := saveSnap(destSnap, srcSnap) metadata, state, ents := loadWAL(srcWAL, walsnap, withV3) saveDB(filepath.Join(destSnap, "db"), filepath.Join(srcSnap, "db"), state.Commit, withV3) idgen := idutil.NewGenerator(0, time.Now()) metadata.NodeID = idgen.Next() metadata.ClusterID = idgen.Next() neww, err := wal.Create(zap.NewExample(), destWAL, pbutil.MustMarshal(&metadata)) if err != nil { log.Fatal(err) } defer neww.Close() if err := neww.Save(state, ents); err != nil { log.Fatal(err) } if err := neww.SaveSnapshot(walsnap); err != nil { log.Fatal(err) } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/rafthttp/util.go#L171-L181
func setPeerURLsHeader(req *http.Request, urls types.URLs) { if urls == nil { // often not set in unit tests return } peerURLs := make([]string, urls.Len()) for i := range urls { peerURLs[i] = urls[i].String() } req.Header.Set("X-PeerURLs", strings.Join(peerURLs, ",")) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/read_only.go#L78-L109
func (ro *readOnly) advance(m pb.Message) []*readIndexStatus { var ( i int found bool ) ctx := string(m.Context) rss := []*readIndexStatus{} for _, okctx := range ro.readIndexQueue { i++ rs, ok := ro.pendingReadIndex[okctx] if !ok { panic("cannot find corresponding read state from pending map") } rss = append(rss, rs) if okctx == ctx { found = true break } } if found { ro.readIndexQueue = ro.readIndexQueue[i:] for _, rs := range rss { delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data)) } return rss } return nil }