id
stringlengths
95
167
text
stringlengths
69
15.9k
title
stringclasses
1 value
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/op.go#L461-L463
func withTop(target SortTarget, order SortOrder) []OpOption { return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)} }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/retry_interceptor.go#L37-L92
func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor { intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { grpcOpts, retryOpts := filterCallOptions(opts) callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) // short circuit for simplicity, and avoiding allocations. if callOpts.max == 0 { return invoker(ctx, method, req, reply, cc, grpcOpts...) } var lastErr error for attempt := uint(0); attempt < callOpts.max; attempt++ { if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil { return err } logger.Debug( "retrying of unary invoker", zap.String("target", cc.Target()), zap.Uint("attempt", attempt), ) lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...) if lastErr == nil { return nil } logger.Warn( "retrying of unary invoker failed", zap.String("target", cc.Target()), zap.Uint("attempt", attempt), zap.Error(lastErr), ) if isContextError(lastErr) { if ctx.Err() != nil { // its the context deadline or cancellation. return lastErr } // its the callCtx deadline or cancellation, in which case try again. continue } if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken { gterr := c.getToken(ctx) if gterr != nil { logger.Warn( "retrying of unary invoker failed to fetch new auth token", zap.String("target", cc.Target()), zap.Error(gterr), ) return lastErr // return the original error for simplicity } continue } if !isSafeRetry(c.lg, lastErr, callOpts) { return lastErr } } return lastErr } }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/watermark.go#L71-L75
func (w *WaterMark) Init(closer *Closer) { w.markCh = make(chan mark, 100) w.elog = trace.NewEventLog("Watermark", w.Name) go w.process(closer) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/pjutil/pjutil.go#L98-L106
func NewPresubmit(pr github.PullRequest, baseSHA string, job config.Presubmit, eventGUID string) prowapi.ProwJob { refs := createRefs(pr, baseSHA) labels := make(map[string]string) for k, v := range job.Labels { labels[k] = v } labels[github.EventGUID] = eventGUID return NewProwJob(PresubmitSpec(job, refs), labels) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/rafthttp/util.go#L58-L60
func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) { return transport.NewTimeoutTransport(tlsInfo, dialTimeout, ConnReadTimeout, ConnWriteTimeout) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/embed/etcd.go#L527-L585
func (e *Etcd) servePeers() (err error) { ph := etcdhttp.NewPeerHandler(e.GetLogger(), e.Server) var peerTLScfg *tls.Config if !e.cfg.PeerTLSInfo.Empty() { if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil { return err } } for _, p := range e.Peers { u := p.Listener.Addr().String() gs := v3rpc.Server(e.Server, peerTLScfg) m := cmux.New(p.Listener) go gs.Serve(m.Match(cmux.HTTP2())) srv := &http.Server{ Handler: grpcHandlerFunc(gs, ph), ReadTimeout: 5 * time.Minute, ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error } go srv.Serve(m.Match(cmux.Any())) p.serve = func() error { return m.Serve() } p.close = func(ctx context.Context) error { // gracefully shutdown http.Server // close open listeners, idle connections // until context cancel or time-out if e.cfg.logger != nil { e.cfg.logger.Info( "stopping serving peer traffic", zap.String("address", u), ) } stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv}) if e.cfg.logger != nil { e.cfg.logger.Info( "stopped serving peer traffic", zap.String("address", u), ) } return nil } } // start peer servers in a goroutine for _, pl := range e.Peers { go func(l *peerListener) { u := l.Addr().String() if e.cfg.logger != nil { e.cfg.logger.Info( "serving peer traffic", zap.String("address", u), ) } else { plog.Info("listening for peers on ", u) } e.errHandler(l.serve()) }(pl) } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/mock/mockserver/mockserver.go#L64-L73
func StartMockServersOnNetwork(count int, network string) (ms *MockServers, err error) { switch network { case "tcp": return startMockServersTcp(count) case "unix": return startMockServersUnix(count) default: return nil, fmt.Errorf("unsupported network type: %s", network) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/adt/interval_tree.go#L117-L133
func (x *intervalNode) updateMax() { for x != nil { oldmax := x.max max := x.iv.Ivl.End if x.left != nil && x.left.max.Compare(max) > 0 { max = x.left.max } if x.right != nil && x.right.max.Compare(max) > 0 { max = x.right.max } if oldmax.Compare(max) == 0 { break } x.max = max x = x.parent } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/election.go#L167-L171
func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse { retc := make(chan v3.GetResponse) go e.observe(ctx, retc) return retc }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/mail/internal/mail/message.go#L193-L195
func (m *Message) SetBody(contentType, body string, settings ...PartSetting) { m.SetBodyWriter(contentType, newCopier(body), settings...) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/skl/skl.go#L442-L444
func (s *Iterator) Seek(target []byte) { s.n, _ = s.list.findNear(target, false, true) // find >=. }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/route_info.go#L73-L92
func (ri *RouteInfo) BuildPathHelper() RouteHelperFunc { cRoute := ri return func(opts map[string]interface{}) (template.HTML, error) { pairs := []string{} for k, v := range opts { pairs = append(pairs, k) pairs = append(pairs, fmt.Sprintf("%v", v)) } url, err := cRoute.MuxRoute.URL(pairs...) if err != nil { return "", errors.Wrapf(err, "missing parameters for %v", cRoute.Path) } result := url.Path result = addExtraParamsTo(result, opts) return template.HTML(result), nil } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/fakegithub/fakegithub.go#L218-L220
func (f *FakeClient) GetRef(owner, repo, ref string) (string, error) { return TestRef, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/embed/config_logging.go#L277-L291
func NewZapCoreLoggerBuilder(lg *zap.Logger, cr zapcore.Core, syncer zapcore.WriteSyncer) func(*Config) error { return func(cfg *Config) error { cfg.loggerMu.Lock() defer cfg.loggerMu.Unlock() cfg.logger = lg cfg.loggerConfig = nil cfg.loggerCore = cr cfg.loggerWriteSyncer = syncer grpcLogOnce.Do(func() { grpclog.SetLoggerV2(logutil.NewGRPCLoggerV2FromZapCore(cr, syncer)) }) return nil } }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/configuration.go#L137-L143
func (c *configurations) Clone() (copy configurations) { copy.committed = c.committed.Clone() copy.committedIndex = c.committedIndex copy.latest = c.latest.Clone() copy.latestIndex = c.latestIndex return }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/fakegithub/fakegithub.go#L223-L226
func (f *FakeClient) DeleteRef(owner, repo, ref string) error { f.RefsDeleted = append(f.RefsDeleted, struct{ Org, Repo, Ref string }{Org: owner, Repo: repo, Ref: ref}) return nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/db.go#L1192-L1256
func (db *DB) Flatten(workers int) error { db.stopCompactions() defer db.startCompactions() compactAway := func(cp compactionPriority) error { db.opt.Infof("Attempting to compact with %+v\n", cp) errCh := make(chan error, 1) for i := 0; i < workers; i++ { go func() { errCh <- db.lc.doCompact(cp) }() } var success int var rerr error for i := 0; i < workers; i++ { err := <-errCh if err != nil { rerr = err db.opt.Warningf("While running doCompact with %+v. Error: %v\n", cp, err) } else { success++ } } if success == 0 { return rerr } // We could do at least one successful compaction. So, we'll consider this a success. db.opt.Infof("%d compactor(s) succeeded. One or more tables from level %d compacted.\n", success, cp.level) return nil } hbytes := func(sz int64) string { return humanize.Bytes(uint64(sz)) } for { db.opt.Infof("\n") var levels []int for i, l := range db.lc.levels { sz := l.getTotalSize() db.opt.Infof("Level: %d. %8s Size. %8s Max.\n", i, hbytes(l.getTotalSize()), hbytes(l.maxTotalSize)) if sz > 0 { levels = append(levels, i) } } if len(levels) <= 1 { prios := db.lc.pickCompactLevels() if len(prios) == 0 || prios[0].score <= 1.0 { db.opt.Infof("All tables consolidated into one level. Flattening done.\n") return nil } if err := compactAway(prios[0]); err != nil { return err } continue } // Create an artificial compaction priority, to ensure that we compact the level. cp := compactionPriority{level: levels[0], score: 1.71} if err := compactAway(cp); err != nil { return err } } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/httputil/httputil.go#L41-L50
func GetHostname(req *http.Request) string { if req == nil { return "" } h, _, err := net.SplitHostPort(req.Host) if err != nil { return req.Host } return h }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/file_snapshot.go#L380-L430
func (s *FileSnapshotSink) Close() error { // Make sure close is idempotent if s.closed { return nil } s.closed = true // Close the open handles if err := s.finalize(); err != nil { s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err) if delErr := os.RemoveAll(s.dir); delErr != nil { s.logger.Printf("[ERR] snapshot: Failed to delete temporary snapshot directory at path %v: %v", s.dir, delErr) return delErr } return err } // Write out the meta data if err := s.writeMeta(); err != nil { s.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err) return err } // Move the directory into place newPath := strings.TrimSuffix(s.dir, tmpSuffix) if err := os.Rename(s.dir, newPath); err != nil { s.logger.Printf("[ERR] snapshot: Failed to move snapshot into place: %v", err) return err } if runtime.GOOS != "windows" { //skipping fsync for directory entry edits on Windows, only needed for *nix style file systems parentFH, err := os.Open(s.parentDir) defer parentFH.Close() if err != nil { s.logger.Printf("[ERR] snapshot: Failed to open snapshot parent directory %v, error: %v", s.parentDir, err) return err } if err = parentFH.Sync(); err != nil { s.logger.Printf("[ERR] snapshot: Failed syncing parent directory %v, error: %v", s.parentDir, err) return err } } // Reap any old snapshots if err := s.store.ReapSnapshots(); err != nil { return err } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2v3/store.go#L607-L622
func (s *v2v3Store) mkV2Node(kv *mvccpb.KeyValue) *v2store.NodeExtern { if kv == nil { return nil } n := &v2store.NodeExtern{ Key: s.mkNodePath(string(kv.Key)), Dir: kv.Key[len(kv.Key)-1] == '/', CreatedIndex: mkV2Rev(kv.CreateRevision), ModifiedIndex: mkV2Rev(kv.ModRevision), } if !n.Dir { v := string(kv.Value) n.Value = &v } return n }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3client/v3client.go#L30-L51
func New(s *etcdserver.EtcdServer) *clientv3.Client { c := clientv3.NewCtxClient(context.Background()) kvc := adapter.KvServerToKvClient(v3rpc.NewQuotaKVServer(s)) c.KV = clientv3.NewKVFromKVClient(kvc, c) lc := adapter.LeaseServerToLeaseClient(v3rpc.NewQuotaLeaseServer(s)) c.Lease = clientv3.NewLeaseFromLeaseClient(lc, c, time.Second) wc := adapter.WatchServerToWatchClient(v3rpc.NewWatchServer(s)) c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc, c)} mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s)) c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc, c) clc := adapter.ClusterServerToClusterClient(v3rpc.NewClusterServer(s)) c.Cluster = clientv3.NewClusterFromClusterClient(clc, c) // TODO: implement clientv3.Auth interface? return c }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/integration/cluster.go#L1050-L1056
func checkLeaderTransition(m *member, oldLead uint64) uint64 { interval := time.Duration(m.s.Cfg.TickMs) * time.Millisecond for m.s.Lead() == 0 || (m.s.Lead() == oldLead) { time.Sleep(interval) } return m.s.Lead() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/contrib/recipes/key.go#L149-L151
func newUniqueEphemeralKey(s *concurrency.Session, prefix string) (*EphemeralKV, error) { return newUniqueEphemeralKV(s, prefix, "") }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv2/command/format.go#L26-L60
func printResponseKey(resp *client.Response, format string) { // Format the result. switch format { case "simple": if resp.Action != "delete" { fmt.Println(resp.Node.Value) } else { fmt.Println("PrevNode.Value:", resp.PrevNode.Value) } case "extended": // Extended prints in a rfc2822 style format fmt.Println("Key:", resp.Node.Key) fmt.Println("Created-Index:", resp.Node.CreatedIndex) fmt.Println("Modified-Index:", resp.Node.ModifiedIndex) if resp.PrevNode != nil { fmt.Println("PrevNode.Value:", resp.PrevNode.Value) } fmt.Println("TTL:", resp.Node.TTL) fmt.Println("Index:", resp.Index) if resp.Action != "delete" { fmt.Println("") fmt.Println(resp.Node.Value) } case "json": b, err := json.Marshal(resp) if err != nil { panic(err) } fmt.Println(string(b)) default: fmt.Fprintln(os.Stderr, "Unsupported output format:", format) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/server.go#L1424-L1461
func (s *EtcdServer) TransferLeadership() error { if !s.isLeader() { if lg := s.getLogger(); lg != nil { lg.Info( "skipped leadership transfer; local server is not leader", zap.String("local-member-id", s.ID().String()), zap.String("current-leader-member-id", types.ID(s.Lead()).String()), ) } else { plog.Printf("skipped leadership transfer for stopping non-leader member") } return nil } if !s.isMultiNode() { if lg := s.getLogger(); lg != nil { lg.Info( "skipped leadership transfer; it's a single-node cluster", zap.String("local-member-id", s.ID().String()), zap.String("current-leader-member-id", types.ID(s.Lead()).String()), ) } else { plog.Printf("skipped leadership transfer for single member cluster") } return nil } transferee, ok := longestConnected(s.r.transport, s.cluster.MemberIDs()) if !ok { return ErrUnhealthy } tm := s.Cfg.ReqTimeout() ctx, cancel := context.WithTimeout(s.ctx, tm) err := s.MoveLeader(ctx, s.Lead(), uint64(transferee)) cancel() return err }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/binding/binding.go#L59-L62
func RegisterCustomDecoder(fn CustomTypeDecoder, types []interface{}, fields []interface{}) { rawFunc := (func([]string) (interface{}, error))(fn) decoder.RegisterCustomType(rawFunc, types, fields) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/y.go#L245-L250
func NewThrottle(max int) *Throttle { return &Throttle{ ch: make(chan struct{}, max), errCh: make(chan error, max), } }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/route.go#L15-L20
func (a *App) Routes() RouteList { if a.root != nil { return a.root.routes } return a.routes }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/y.go#L109-L114
func KeyWithTs(key []byte, ts uint64) []byte { out := make([]byte, len(key)+8) copy(out, key) binary.BigEndian.PutUint64(out[len(key):], math.MaxUint64-ts) return out }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/y.go#L162-L167
func (s *Slice) Resize(sz int) []byte { if cap(s.buf) < sz { s.buf = make([]byte, sz) } return s.buf[0:sz] }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/types/urlsmap.go#L29-L41
func NewURLsMap(s string) (URLsMap, error) { m := parse(s) cl := URLsMap{} for name, urls := range m { us, err := NewURLs(urls) if err != nil { return nil, err } cl[name] = us } return cl, nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/fakegithub/fakegithub.go#L289-L305
func (f *FakeClient) AddLabel(owner, repo string, number int, label string) error { labelString := fmt.Sprintf("%s/%s#%d:%s", owner, repo, number, label) if sets.NewString(f.IssueLabelsAdded...).Has(labelString) { return fmt.Errorf("cannot add %v to %s/%s/#%d", label, owner, repo, number) } if f.RepoLabelsExisting == nil { f.IssueLabelsAdded = append(f.IssueLabelsAdded, labelString) return nil } for _, l := range f.RepoLabelsExisting { if label == l { f.IssueLabelsAdded = append(f.IssueLabelsAdded, labelString) return nil } } return fmt.Errorf("cannot add %v to %s/%s/#%d", label, owner, repo, number) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/auth/options.go#L97-L108
func (opts *jwtOptions) Key() (interface{}, error) { switch opts.SignMethod.(type) { case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS: return opts.rsaKey() case *jwt.SigningMethodECDSA: return opts.ecKey() case *jwt.SigningMethodHMAC: return opts.hmacKey() default: return nil, fmt.Errorf("unsupported signing method: %T", opts.SignMethod) } }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/render/func.go#L29-L34
func Func(s string, fn RendererFunc) Renderer { return funcRenderer{ contentType: s, renderFunc: fn, } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/member_command.go#L70-L81
func NewMemberUpdateCommand() *cobra.Command { cc := &cobra.Command{ Use: "update <memberID> [options]", Short: "Updates a member in the cluster", Run: memberUpdateCommandFunc, } cc.Flags().StringVar(&memberPeerURLs, "peer-urls", "", "comma separated peer URLs for the updated member.") return cc }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/route_info.go#L49-L70
func (ri *RouteInfo) Name(name string) *RouteInfo { routeIndex := -1 for index, route := range ri.App.Routes() { if route.Path == ri.Path && route.Method == ri.Method { routeIndex = index break } } name = flect.Camelize(name) if !strings.HasSuffix(name, "Path") { name = name + "Path" } ri.PathName = name if routeIndex != -1 { ri.App.Routes()[routeIndex] = reflect.ValueOf(ri).Interface().(*RouteInfo) } return ri }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/raft.go#L817-L852
func (r *Raft) appendConfigurationEntry(future *configurationChangeFuture) { configuration, err := nextConfiguration(r.configurations.latest, r.configurations.latestIndex, future.req) if err != nil { future.respond(err) return } r.logger.Info(fmt.Sprintf("Updating configuration with %s (%v, %v) to %+v", future.req.command, future.req.serverID, future.req.serverAddress, configuration.Servers)) // In pre-ID compatibility mode we translate all configuration changes // in to an old remove peer message, which can handle all supported // cases for peer changes in the pre-ID world (adding and removing // voters). Both add peer and remove peer log entries are handled // similarly on old Raft servers, but remove peer does extra checks to // see if a leader needs to step down. Since they both assert the full // configuration, then we can safely call remove peer for everything. if r.protocolVersion < 2 { future.log = Log{ Type: LogRemovePeerDeprecated, Data: encodePeers(configuration, r.trans), } } else { future.log = Log{ Type: LogConfiguration, Data: encodeConfiguration(configuration), } } r.dispatchLogs([]*logFuture{&future.logFuture}) index := future.Index() r.configurations.latest = configuration r.configurations.latestIndex = index r.leaderState.commitment.setConfiguration(configuration) r.startStopReplication() }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/request_logger.go#L20-L56
func RequestLoggerFunc(h Handler) Handler { return func(c Context) error { var irid interface{} if irid = c.Session().Get("requestor_id"); irid == nil { irid = randx.String(10) c.Session().Set("requestor_id", irid) c.Session().Save() } rid := irid.(string) + "-" + randx.String(10) c.Set("request_id", rid) c.LogField("request_id", rid) start := time.Now() defer func() { ws, ok := c.Response().(*Response) if !ok { ws = &Response{ResponseWriter: c.Response()} ws.Status = 200 } req := c.Request() ct := httpx.ContentType(req) if ct != "" { c.LogField("content_type", ct) } c.LogFields(map[string]interface{}{ "method": req.Method, "path": req.URL.String(), "duration": time.Since(start), "size": ws.Size, "human_size": humanize.Bytes(uint64(ws.Size)), "status": ws.Status, }) c.Logger().Info(req.URL.String()) }() return h(c) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/get_command.go#L38-L55
func NewGetCommand() *cobra.Command { cmd := &cobra.Command{ Use: "get [options] <key> [range_end]", Short: "Gets the key or a range of keys", Run: getCommandFunc, } cmd.Flags().StringVar(&getConsistency, "consistency", "l", "Linearizable(l) or Serializable(s)") cmd.Flags().StringVar(&getSortOrder, "order", "", "Order of results; ASCEND or DESCEND (ASCEND by default)") cmd.Flags().StringVar(&getSortTarget, "sort-by", "", "Sort target; CREATE, KEY, MODIFY, VALUE, or VERSION") cmd.Flags().Int64Var(&getLimit, "limit", 0, "Maximum number of results") cmd.Flags().BoolVar(&getPrefix, "prefix", false, "Get keys with matching prefix") cmd.Flags().BoolVar(&getFromKey, "from-key", false, "Get keys that are greater than or equal to the given key using byte compare") cmd.Flags().Int64Var(&getRev, "rev", 0, "Specify the kv revision") cmd.Flags().BoolVar(&getKeysOnly, "keys-only", false, "Get only the keys") cmd.Flags().BoolVar(&printValueOnly, "print-value-only", false, `Only write values when using the "simple" output format`) return cmd }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/render/func.go#L39-L41
func (e *Engine) Func(s string, fn RendererFunc) Renderer { return Func(s, fn) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/lease/lessor.go#L586-L609
func (le *lessor) revokeExpiredLeases() { var ls []*Lease // rate limit revokeLimit := leaseRevokeRate / 2 le.mu.RLock() if le.isPrimary() { ls = le.findExpiredLeases(revokeLimit) } le.mu.RUnlock() if len(ls) != 0 { select { case <-le.stopC: return case le.expiredC <- ls: default: // the receiver of expiredC is probably busy handling // other stuff // let's try this next time after 500ms } } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/netutil/routes_linux.go#L130-L157
func getIfaceAddr(idx uint32, family uint8) (*syscall.NetlinkMessage, error) { dat, err := syscall.NetlinkRIB(syscall.RTM_GETADDR, int(family)) if err != nil { return nil, err } msgs, msgErr := syscall.ParseNetlinkMessage(dat) if msgErr != nil { return nil, msgErr } ifaddrmsg := syscall.IfAddrmsg{} for _, m := range msgs { if m.Header.Type != syscall.RTM_NEWADDR { continue } buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfAddrmsg]) if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifaddrmsg); rerr != nil { continue } if ifaddrmsg.Index == idx { return &m, nil } } return nil, fmt.Errorf("could not find address for interface index %v", idx) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/levels.go#L265-L323
func (s *levelsController) dropPrefix(prefix []byte) error { opt := s.kv.opt for _, l := range s.levels { l.RLock() if l.level == 0 { size := len(l.tables) l.RUnlock() if size > 0 { cp := compactionPriority{ level: 0, score: 1.74, // A unique number greater than 1.0 does two things. Helps identify this // function in logs, and forces a compaction. dropPrefix: prefix, } if err := s.doCompact(cp); err != nil { opt.Warningf("While compacting level 0: %v", err) return nil } } continue } var tables []*table.Table for _, table := range l.tables { var absent bool switch { case bytes.HasPrefix(table.Smallest(), prefix): case bytes.HasPrefix(table.Biggest(), prefix): case bytes.Compare(prefix, table.Smallest()) > 0 && bytes.Compare(prefix, table.Biggest()) < 0: default: absent = true } if !absent { tables = append(tables, table) } } l.RUnlock() if len(tables) == 0 { continue } cd := compactDef{ elog: trace.New(fmt.Sprintf("Badger.L%d", l.level), "Compact"), thisLevel: l, nextLevel: l, top: []*table.Table{}, bot: tables, dropPrefix: prefix, } if err := s.runCompactDef(l.level, cd); err != nil { opt.Warningf("While running compact def: %+v. Error: %v", cd, err) return err } } return nil }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/buffalo/cmd/fix/npm.go#L19-L87
func PackageJSONCheck(r *Runner) error { fmt.Println("~~~ Checking package.json ~~~") if !r.App.WithWebpack { return nil } box := webpack.Templates f, err := box.FindString("package.json.tmpl") if err != nil { return err } tmpl, err := template.New("package.json").Parse(f) if err != nil { return err } bb := &bytes.Buffer{} err = tmpl.Execute(bb, map[string]interface{}{ "opts": &webpack.Options{ App: r.App, }, }) if err != nil { return err } b, err := ioutil.ReadFile("package.json") if err != nil { return err } if string(b) == bb.String() { return nil } if !ask("Your package.json file is different from the latest Buffalo template.\nWould you like to REPLACE yours with the latest template?") { fmt.Println("\tskipping package.json") return nil } pf, err := os.Create("package.json") if err != nil { return err } _, err = pf.Write(bb.Bytes()) if err != nil { return err } err = pf.Close() if err != nil { return err } os.RemoveAll(filepath.Join(r.App.Root, "node_modules")) var cmd *exec.Cmd if r.App.WithYarn { cmd = exec.Command("yarnpkg", "install") } else { cmd = exec.Command("npm", "install") } cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd.Run() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/embed/serve.go#L206-L219
func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler { if otherHandler == nil { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { grpcServer.ServeHTTP(w, r) }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { grpcServer.ServeHTTP(w, r) } else { otherHandler.ServeHTTP(w, r) } }) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/status.go#L69-L86
func (s Status) MarshalJSON() ([]byte, error) { j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`, s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied) if len(s.Progress) == 0 { j += "}," } else { for k, v := range s.Progress { subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State) j += subj } // remove the trailing "," j = j[:len(j)-1] + "}," } j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee) return []byte(j), nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/defrag_command.go#L32-L41
func NewDefragCommand() *cobra.Command { cmd := &cobra.Command{ Use: "defrag", Short: "Defragments the storage of the etcd members with given endpoints", Run: defragCommandFunc, } cmd.PersistentFlags().BoolVar(&epClusterEndpoints, "cluster", false, "use all endpoints from the cluster member list") cmd.Flags().StringVar(&defragDataDir, "data-dir", "", "Optional. If present, defragments a data directory not in use by etcd.") return cmd }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/watcher.go#L79-L87
func (w *watcher) Remove() { w.hub.mutex.Lock() defer w.hub.mutex.Unlock() close(w.eventChan) if w.remove != nil { w.remove() } }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/plugins.go#L18-L68
func LoadPlugins() error { var err error oncer.Do("events.LoadPlugins", func() { // don't send plugins events during testing if envy.Get("GO_ENV", "development") == "test" { return } plugs, err := plugins.Available() if err != nil { err = err return } for _, cmds := range plugs { for _, c := range cmds { if c.BuffaloCommand != "events" { continue } err := func(c plugins.Command) error { return safe.RunE(func() error { n := fmt.Sprintf("[PLUGIN] %s %s", c.Binary, c.Name) fn := func(e events.Event) { b, err := json.Marshal(e) if err != nil { fmt.Println("error trying to marshal event", e, err) return } cmd := exec.Command(c.Binary, c.UseCommand, string(b)) cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout cmd.Stdin = os.Stdin if err := cmd.Run(); err != nil { fmt.Println("error trying to send event", strings.Join(cmd.Args, " "), err) } } _, err := events.NamedListen(n, events.Filter(c.ListenFor, fn)) if err != nil { return err } return nil }) }(c) if err != nil { err = err return } } } }) return err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/key_index.go#L224-L253
func (ki *keyIndex) compact(lg *zap.Logger, atRev int64, available map[revision]struct{}) { if ki.isEmpty() { if lg != nil { lg.Panic( "'compact' got an unexpected empty keyIndex", zap.String("key", string(ki.key)), ) } else { plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key)) } } genIdx, revIndex := ki.doCompact(atRev, available) g := &ki.generations[genIdx] if !g.isEmpty() { // remove the previous contents. if revIndex != -1 { g.revs = g.revs[revIndex:] } // remove any tombstone if len(g.revs) == 1 && genIdx != len(ki.generations)-1 { delete(available, g.revs[0]) genIdx++ } } // remove the previous generations. ki.generations = ki.generations[genIdx:] }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/store.go#L248-L257
func getCompareFailCause(n *node, which int, prevValue string, prevIndex uint64) string { switch which { case CompareIndexNotMatch: return fmt.Sprintf("[%v != %v]", prevIndex, n.ModifiedIndex) case CompareValueNotMatch: return fmt.Sprintf("[%v != %v]", prevValue, n.Value) default: return fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, n.Value, prevIndex, n.ModifiedIndex) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/watch.go#L732-L826
func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { if ws.closing { panic("created substream goroutine but substream is closing") } // nextRev is the minimum expected next revision nextRev := ws.initReq.rev resuming := false defer func() { if !resuming { ws.closing = true } close(ws.donec) if !resuming { w.closingc <- ws } w.wg.Done() }() emptyWr := &WatchResponse{} for { curWr := emptyWr outc := ws.outc if len(ws.buf) > 0 { curWr = ws.buf[0] } else { outc = nil } select { case outc <- *curWr: if ws.buf[0].Err() != nil { return } ws.buf[0] = nil ws.buf = ws.buf[1:] case wr, ok := <-ws.recvc: if !ok { // shutdown from closeSubstream return } if wr.Created { if ws.initReq.retc != nil { ws.initReq.retc <- ws.outc // to prevent next write from taking the slot in buffered channel // and posting duplicate create events ws.initReq.retc = nil // send first creation event only if requested if ws.initReq.createdNotify { ws.outc <- *wr } // once the watch channel is returned, a current revision // watch must resume at the store revision. This is necessary // for the following case to work as expected: // wch := m1.Watch("a") // m2.Put("a", "b") // <-wch // If the revision is only bound on the first observed event, // if wch is disconnected before the Put is issued, then reconnects // after it is committed, it'll miss the Put. if ws.initReq.rev == 0 { nextRev = wr.Header.Revision } } } else { // current progress of watch; <= store revision nextRev = wr.Header.Revision } if len(wr.Events) > 0 { nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 } ws.initReq.rev = nextRev // created event is already sent above, // watcher should not post duplicate events if wr.Created { continue } // TODO pause channel if buffer gets too large ws.buf = append(ws.buf, wr) case <-w.ctx.Done(): return case <-ws.initReq.ctx.Done(): return case <-resumec: resuming = true return } } // lazily send cancel message if events on missing id }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/proxy/server.go#L295-L415
func (s *server) listenAndServe() { defer s.closeWg.Done() s.lg.Info("proxy is listening on", zap.String("from", s.From())) close(s.readyc) for { s.pauseAcceptMu.Lock() pausec := s.pauseAcceptc s.pauseAcceptMu.Unlock() select { case <-pausec: case <-s.donec: return } s.latencyAcceptMu.RLock() lat := s.latencyAccept s.latencyAcceptMu.RUnlock() if lat > 0 { select { case <-time.After(lat): case <-s.donec: return } } s.listenerMu.RLock() ln := s.listener s.listenerMu.RUnlock() in, err := ln.Accept() if err != nil { select { case s.errc <- err: select { case <-s.donec: return default: } case <-s.donec: return } s.lg.Debug("listener accept error", zap.Error(err)) if strings.HasSuffix(err.Error(), "use of closed network connection") { select { case <-time.After(s.retryInterval): case <-s.donec: return } s.lg.Debug("listener is closed; retry listening on", zap.String("from", s.From())) if err = s.ResetListener(); err != nil { select { case s.errc <- err: select { case <-s.donec: return default: } case <-s.donec: return } s.lg.Warn("failed to reset listener", zap.Error(err)) } } continue } var out net.Conn if !s.tlsInfo.Empty() { var tp *http.Transport tp, err = transport.NewTransport(s.tlsInfo, s.dialTimeout) if err != nil { select { case s.errc <- err: select { case <-s.donec: return default: } case <-s.donec: return } continue } out, err = tp.Dial(s.to.Scheme, s.to.Host) } else { out, err = net.Dial(s.to.Scheme, s.to.Host) } if err != nil { select { case s.errc <- err: select { case <-s.donec: return default: } case <-s.donec: return } s.lg.Debug("failed to dial", zap.Error(err)) continue } go func() { // read incoming bytes from listener, dispatch to outgoing connection s.transmit(out, in) out.Close() in.Close() }() go func() { // read response from outgoing connection, write back to listener s.receive(in, out) in.Close() out.Close() }() } }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/genny/build/validate.go#L24-L63
func ValidateTemplates(walk packd.Walker, tvs []TemplateValidator) genny.RunFn { if len(tvs) == 0 { return func(r *genny.Runner) error { return nil } } return func(r *genny.Runner) error { var errs []string err := packd.SkipWalker(walk, packd.CommonSkipPrefixes, func(path string, file packd.File) error { info, err := file.FileInfo() if err != nil { return err } if info.IsDir() { return nil } f := genny.NewFile(path, file) for _, tv := range tvs { err := safe.Run(func() { if err := tv(f); err != nil { errs = append(errs, fmt.Sprintf("template error in file %s: %s", path, err.Error())) } }) if err != nil { return err } } return nil }) if err != nil { return err } if len(errs) == 0 { return nil } return errors.New(strings.Join(errs, "\n")) } }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/skl/skl.go#L167-L173
func randomHeight() int { h := 1 for h < maxHeight && rand.Uint32() <= heightIncrease { h++ } return h }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/leasing/kv.go#L56-L78
func NewKV(cl *v3.Client, pfx string, opts ...concurrency.SessionOption) (v3.KV, func(), error) { cctx, cancel := context.WithCancel(cl.Ctx()) lkv := &leasingKV{ cl: cl, kv: cl.KV, pfx: pfx, leases: leaseCache{revokes: make(map[string]time.Time)}, ctx: cctx, cancel: cancel, sessionOpts: opts, sessionc: make(chan struct{}), } lkv.wg.Add(2) go func() { defer lkv.wg.Done() lkv.monitorSession() }() go func() { defer lkv.wg.Done() lkv.leases.clearOldRevokes(cctx) }() return lkv, lkv.Close, lkv.waitSession(cctx) }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/mail/mail.go#L33-L37
func New(c buffalo.Context) Message { m := NewFromData(c.Data()) m.Context = c return m }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/db.go#L998-L1018
func (db *DB) RunValueLogGC(discardRatio float64) error { if discardRatio >= 1.0 || discardRatio <= 0.0 { return ErrInvalidRequest } // Find head on disk headKey := y.KeyWithTs(head, math.MaxUint64) // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key val, err := db.lc.get(headKey, nil) if err != nil { return errors.Wrap(err, "Retrieving head from on-disk LSM") } var head valuePointer if len(val.Value) > 0 { head.Decode(val.Value) } // Pick a log file and run GC return db.vlog.runGC(discardRatio, head) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/node.go#L438-L445
func (n *node) Tick() { select { case n.tickc <- struct{}{}: case <-n.done: default: n.logger.Warningf("A tick missed to fire. Node blocks too long!") } }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/structs.go#L35-L40
func (p valuePointer) Encode(b []byte) []byte { binary.BigEndian.PutUint32(b[:4], p.Fid) binary.BigEndian.PutUint32(b[4:8], p.Len) binary.BigEndian.PutUint32(b[8:12], p.Offset) return b[:vptrSize] }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/check.go#L119-L133
func NewCheckPerfCommand() *cobra.Command { cmd := &cobra.Command{ Use: "perf [options]", Short: "Check the performance of the etcd cluster", Run: newCheckPerfCommand, } // TODO: support customized configuration cmd.Flags().StringVar(&checkPerfLoad, "load", "s", "The performance check's workload model. Accepted workloads: s(small), m(medium), l(large), xl(xLarge)") cmd.Flags().StringVar(&checkPerfPrefix, "prefix", "/etcdctl-check-perf/", "The prefix for writing the performance check's keys.") cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "Compact storage with last revision after test is finished.") cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "Defragment storage after test is finished.") return cmd }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/levels.go#L420-L624
func (s *levelsController) compactBuildTables( lev int, cd compactDef) ([]*table.Table, func() error, error) { topTables := cd.top botTables := cd.bot var hasOverlap bool { kr := getKeyRange(cd.top) for i, lh := range s.levels { if i <= lev { // Skip upper levels. continue } lh.RLock() left, right := lh.overlappingTables(levelHandlerRLocked{}, kr) lh.RUnlock() if right-left > 0 { hasOverlap = true break } } } // Try to collect stats so that we can inform value log about GC. That would help us find which // value log file should be GCed. discardStats := make(map[uint32]int64) updateStats := func(vs y.ValueStruct) { if vs.Meta&bitValuePointer > 0 { var vp valuePointer vp.Decode(vs.Value) discardStats[vp.Fid] += int64(vp.Len) } } // Create iterators across all the tables involved first. var iters []y.Iterator if lev == 0 { iters = appendIteratorsReversed(iters, topTables, false) } else if len(topTables) > 0 { y.AssertTrue(len(topTables) == 1) iters = []y.Iterator{topTables[0].NewIterator(false)} } // Next level has level>=1 and we can use ConcatIterator as key ranges do not overlap. var valid []*table.Table for _, table := range botTables { if len(cd.dropPrefix) > 0 && bytes.HasPrefix(table.Smallest(), cd.dropPrefix) && bytes.HasPrefix(table.Biggest(), cd.dropPrefix) { // All the keys in this table have the dropPrefix. So, this table does not need to be // in the iterator and can be dropped immediately. continue } valid = append(valid, table) } iters = append(iters, table.NewConcatIterator(valid, false)) it := y.NewMergeIterator(iters, false) defer it.Close() // Important to close the iterator to do ref counting. it.Rewind() // Pick a discard ts, so we can discard versions below this ts. We should // never discard any versions starting from above this timestamp, because // that would affect the snapshot view guarantee provided by transactions. discardTs := s.kv.orc.discardAtOrBelow() // Start generating new tables. type newTableResult struct { table *table.Table err error } resultCh := make(chan newTableResult) var numBuilds, numVersions int var lastKey, skipKey []byte for it.Valid() { timeStart := time.Now() builder := table.NewTableBuilder() var numKeys, numSkips uint64 for ; it.Valid(); it.Next() { // See if we need to skip the prefix. if len(cd.dropPrefix) > 0 && bytes.HasPrefix(it.Key(), cd.dropPrefix) { numSkips++ updateStats(it.Value()) continue } // See if we need to skip this key. if len(skipKey) > 0 { if y.SameKey(it.Key(), skipKey) { numSkips++ updateStats(it.Value()) continue } else { skipKey = skipKey[:0] } } if !y.SameKey(it.Key(), lastKey) { if builder.ReachedCapacity(s.kv.opt.MaxTableSize) { // Only break if we are on a different key, and have reached capacity. We want // to ensure that all versions of the key are stored in the same sstable, and // not divided across multiple tables at the same level. break } lastKey = y.SafeCopy(lastKey, it.Key()) numVersions = 0 } vs := it.Value() version := y.ParseTs(it.Key()) if version <= discardTs { // Keep track of the number of versions encountered for this key. Only consider the // versions which are below the minReadTs, otherwise, we might end up discarding the // only valid version for a running transaction. numVersions++ lastValidVersion := vs.Meta&bitDiscardEarlierVersions > 0 if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) || numVersions > s.kv.opt.NumVersionsToKeep || lastValidVersion { // If this version of the key is deleted or expired, skip all the rest of the // versions. Ensure that we're only removing versions below readTs. skipKey = y.SafeCopy(skipKey, it.Key()) if lastValidVersion { // Add this key. We have set skipKey, so the following key versions // would be skipped. } else if hasOverlap { // If this key range has overlap with lower levels, then keep the deletion // marker with the latest version, discarding the rest. We have set skipKey, // so the following key versions would be skipped. } else { // If no overlap, we can skip all the versions, by continuing here. numSkips++ updateStats(vs) continue // Skip adding this key. } } } numKeys++ y.Check(builder.Add(it.Key(), it.Value())) } // It was true that it.Valid() at least once in the loop above, which means we // called Add() at least once, and builder is not Empty(). s.kv.opt.Debugf("LOG Compact. Added %d keys. Skipped %d keys. Iteration took: %v", numKeys, numSkips, time.Since(timeStart)) if !builder.Empty() { numBuilds++ fileID := s.reserveFileID() go func(builder *table.Builder) { defer builder.Close() fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.kv.opt.Dir), true) if err != nil { resultCh <- newTableResult{nil, errors.Wrapf(err, "While opening new table: %d", fileID)} return } if _, err := fd.Write(builder.Finish()); err != nil { resultCh <- newTableResult{nil, errors.Wrapf(err, "Unable to write to file: %d", fileID)} return } tbl, err := table.OpenTable(fd, s.kv.opt.TableLoadingMode, nil) // decrRef is added below. resultCh <- newTableResult{tbl, errors.Wrapf(err, "Unable to open table: %q", fd.Name())} }(builder) } } newTables := make([]*table.Table, 0, 20) // Wait for all table builders to finish. var firstErr error for x := 0; x < numBuilds; x++ { res := <-resultCh newTables = append(newTables, res.table) if firstErr == nil { firstErr = res.err } } if firstErr == nil { // Ensure created files' directory entries are visible. We don't mind the extra latency // from not doing this ASAP after all file creation has finished because this is a // background operation. firstErr = syncDir(s.kv.opt.Dir) } if firstErr != nil { // An error happened. Delete all the newly created table files (by calling DecrRef // -- we're the only holders of a ref). for j := 0; j < numBuilds; j++ { if newTables[j] != nil { newTables[j].DecrRef() } } errorReturn := errors.Wrapf(firstErr, "While running compaction for: %+v", cd) return nil, nil, errorReturn } sort.Slice(newTables, func(i, j int) bool { return y.CompareKeys(newTables[i].Biggest(), newTables[j].Biggest()) < 0 }) s.kv.vlog.updateDiscardStats(discardStats) s.kv.opt.Debugf("Discard stats: %v", discardStats) return newTables, func() error { return decrRefs(newTables) }, nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/config.go#L222-L265
func ValidateConfig(config *Config) error { // We don't actually support running as 0 in the library any more, but // we do understand it. protocolMin := ProtocolVersionMin if protocolMin == 0 { protocolMin = 1 } if config.ProtocolVersion < protocolMin || config.ProtocolVersion > ProtocolVersionMax { return fmt.Errorf("Protocol version %d must be >= %d and <= %d", config.ProtocolVersion, protocolMin, ProtocolVersionMax) } if len(config.LocalID) == 0 { return fmt.Errorf("LocalID cannot be empty") } if config.HeartbeatTimeout < 5*time.Millisecond { return fmt.Errorf("Heartbeat timeout is too low") } if config.ElectionTimeout < 5*time.Millisecond { return fmt.Errorf("Election timeout is too low") } if config.CommitTimeout < time.Millisecond { return fmt.Errorf("Commit timeout is too low") } if config.MaxAppendEntries <= 0 { return fmt.Errorf("MaxAppendEntries must be positive") } if config.MaxAppendEntries > 1024 { return fmt.Errorf("MaxAppendEntries is too large") } if config.SnapshotInterval < 5*time.Millisecond { return fmt.Errorf("Snapshot interval is too low") } if config.LeaderLeaseTimeout < 5*time.Millisecond { return fmt.Errorf("Leader lease timeout is too low") } if config.LeaderLeaseTimeout > config.HeartbeatTimeout { return fmt.Errorf("Leader lease timeout cannot be larger than heartbeat timeout") } if config.ElectionTimeout < config.HeartbeatTimeout { return fmt.Errorf("Election timeout must be equal or greater than Heartbeat Timeout") } return nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/fakegithub/fakegithub.go#L318-L320
func (f *FakeClient) FindIssues(query, sort string, asc bool) ([]github.Issue, error) { return f.Issues, nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/txn.go#L325-L328
func (txn *Txn) SetWithMeta(key, val []byte, meta byte) error { e := &Entry{Key: key, Value: val, UserMeta: meta} return txn.SetEntry(e) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/client/clientset/versioned/typed/prowjobs/v1/fake/fake_prowjob.go#L132-L140
func (c *FakeProwJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *prowjobsv1.ProwJob, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(prowjobsResource, c.ns, name, data, subresources...), &prowjobsv1.ProwJob{}) if obj == nil { return nil, err } return obj.(*prowjobsv1.ProwJob), err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/alarm_command.go#L47-L58
func alarmDisarmCommandFunc(cmd *cobra.Command, args []string) { if len(args) != 0 { ExitWithError(ExitBadArgs, fmt.Errorf("alarm disarm command accepts no arguments")) } ctx, cancel := commandCtx(cmd) resp, err := mustClientFromCmd(cmd).AlarmDisarm(ctx, &v3.AlarmMember{}) cancel() if err != nil { ExitWithError(ExitError, err) } display.Alarm(*resp) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/adt/interval_tree.go#L178-L218
func (ivt *IntervalTree) Delete(ivl Interval) bool { z := ivt.find(ivl) if z == nil { return false } y := z if z.left != nil && z.right != nil { y = z.successor() } x := y.left if x == nil { x = y.right } if x != nil { x.parent = y.parent } if y.parent == nil { ivt.root = x } else { if y == y.parent.left { y.parent.left = x } else { y.parent.right = x } y.parent.updateMax() } if y != z { z.iv = y.iv z.updateMax() } if y.color() == black && x != nil { ivt.deleteFixup(x) } ivt.count-- return true }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2stats/queue.go#L103-L110
func (q *statsQueue) Clear() { q.rwl.Lock() defer q.rwl.Unlock() q.back = -1 q.front = 0 q.size = 0 q.totalReqSize = 0 }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/default_context.go#L178-L180
func (d *DefaultContext) LogFields(values map[string]interface{}) { d.logger = d.logger.WithFields(values) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/stream.go#L276-L325
func (st *Stream) Orchestrate(ctx context.Context) error { st.rangeCh = make(chan keyRange, 3) // Contains keys for posting lists. // kvChan should only have a small capacity to ensure that we don't buffer up too much data if // sending is slow. Page size is set to 4MB, which is used to lazily cap the size of each // KVList. To get around 64MB buffer, we can set the channel size to 16. st.kvChan = make(chan *pb.KVList, 16) if st.KeyToList == nil { st.KeyToList = st.ToList } // Picks up ranges from Badger, and sends them to rangeCh. go st.produceRanges(ctx) errCh := make(chan error, 1) // Stores error by consumeKeys. var wg sync.WaitGroup for i := 0; i < st.NumGo; i++ { wg.Add(1) go func() { defer wg.Done() // Picks up ranges from rangeCh, generates KV lists, and sends them to kvChan. if err := st.produceKVs(ctx); err != nil { select { case errCh <- err: default: } } }() } // Pick up key-values from kvChan and send to stream. kvErr := make(chan error, 1) go func() { // Picks up KV lists from kvChan, and sends them to Output. kvErr <- st.streamKVs(ctx) }() wg.Wait() // Wait for produceKVs to be over. close(st.kvChan) // Now we can close kvChan. select { case err := <-errCh: // Check error from produceKVs. return err default: } // Wait for key streaming to be over. err := <-kvErr return err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/storage.go#L108-L125
func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) { ms.Lock() defer ms.Unlock() offset := ms.ents[0].Index if lo <= offset { return nil, ErrCompacted } if hi > ms.lastIndex()+1 { raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex()) } // only contains dummy entries. if len(ms.ents) == 1 { return nil, ErrUnavailable } ents := ms.ents[lo-offset : hi-offset] return limitSize(ents, maxSize), nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/client/keys.go#L92-L97
func NewKeysAPIWithPrefix(c Client, p string) KeysAPI { return &httpKeysAPI{ client: c, prefix: p, } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/config.go#L195-L200
func (c *ServerConfig) hasLocalMember() error { if urls := c.InitialPeerURLsMap[c.Name]; urls == nil { return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name) } return nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/net_transport.go#L686-L706
func (n *netPipeline) decodeResponses() { timeout := n.trans.timeout for { select { case future := <-n.inprogressCh: if timeout > 0 { n.conn.conn.SetReadDeadline(time.Now().Add(timeout)) } _, err := decodeResponse(n.conn, future.resp) future.respond(err) select { case n.doneCh <- future: case <-n.shutdownCh: return } case <-n.shutdownCh: return } } }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/cookies.go#L63-L73
func (c *Cookies) Delete(name string) { ck := http.Cookie{ Name: name, Value: "v", // Setting a time in the distant past, like the unix epoch, removes the cookie, // since it has long expired. Expires: time.Unix(0, 0), } http.SetCookie(c.res, &ck) }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/servers/listener.go#L29-L38
func UnixSocket(addr string) (*Listener, error) { listener, err := net.Listen("unix", addr) if err != nil { return nil, err } return &Listener{ Server: &http.Server{}, Listener: listener, }, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/node.go#L340-L355
func (n *node) Compare(prevValue string, prevIndex uint64) (ok bool, which int) { indexMatch := prevIndex == 0 || n.ModifiedIndex == prevIndex valueMatch := prevValue == "" || n.Value == prevValue ok = valueMatch && indexMatch switch { case valueMatch && indexMatch: which = CompareMatch case indexMatch && !valueMatch: which = CompareValueNotMatch case valueMatch && !indexMatch: which = CompareIndexNotMatch default: which = CompareNotMatch } return ok, which }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/mail/internal/mail/smtp.go#L195-L203
func (d *Dialer) DialAndSend(m ...*Message) error { s, err := d.Dial() if err != nil { return err } defer s.Close() return Send(s, m...) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/agent/server.go#L129-L169
func (srv *Server) Transport(stream rpcpb.Transport_TransportServer) (err error) { errc := make(chan error) go func() { for { var req *rpcpb.Request req, err = stream.Recv() if err != nil { errc <- err // TODO: handle error and retry return } if req.Member != nil { srv.Member = req.Member } if req.Tester != nil { srv.Tester = req.Tester } var resp *rpcpb.Response resp, err = srv.handleTesterRequest(req) if err != nil { errc <- err // TODO: handle error and retry return } if err = stream.Send(resp); err != nil { errc <- err // TODO: handle error and retry return } } }() select { case err = <-errc: case <-stream.Context().Done(): err = stream.Context().Err() } return err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/mutex.go#L42-L78
func (m *Mutex) Lock(ctx context.Context) error { s := m.s client := m.s.Client() m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease()) cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0) // put self in lock waiters via myKey; oldest waiter holds lock put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) // reuse key in case this session already holds the lock get := v3.OpGet(m.myKey) // fetch current holder to complete uncontended path with only one RPC getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...) resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit() if err != nil { return err } m.myRev = resp.Header.Revision if !resp.Succeeded { m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision } // if no key on prefix / the minimum rev is key, already hold the lock ownerKey := resp.Responses[1].GetResponseRange().Kvs if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { m.hdr = resp.Header return nil } // wait for deletion revisions prior to myKey hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) // release lock key if wait failed if werr != nil { m.Unlock(client.Ctx()) } else { m.hdr = hdr } return werr }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/embed/serve.go#L385-L390
func WrapCORS(cors map[string]struct{}, h http.Handler) http.Handler { return &corsHandler{ ac: &etcdserver.AccessController{CORS: cors}, h: h, } }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/api.go#L580-L590
func (r *Raft) BootstrapCluster(configuration Configuration) Future { bootstrapReq := &bootstrapFuture{} bootstrapReq.init() bootstrapReq.configuration = configuration select { case <-r.shutdownCh: return errorFuture{ErrRaftShutdown} case r.bootstrapCh <- bootstrapReq: return bootstrapReq } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/config.go#L268-L272
func (c *ServerConfig) ReqTimeout() time.Duration { // 5s for queue waiting, computation and disk IO delay // + 2 * election timeout for possible leader election return 5*time.Second + 2*time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/client/members.go#L291-L294
func v2MembersURL(ep url.URL) *url.URL { ep.Path = path.Join(ep.Path, defaultV2MembersPrefix) return &ep }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/fakegithub/fakegithub.go#L152-L160
func (f *FakeClient) CreateReview(org, repo string, number int, r github.DraftReview) error { f.Reviews[number] = append(f.Reviews[number], github.Review{ ID: f.ReviewID, User: github.User{Login: botName}, Body: r.Body, }) f.ReviewID++ return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2auth/auth.go#L576-L597
func (rw RWPermission) Grant(n RWPermission) (RWPermission, error) { var out RWPermission currentRead := types.NewUnsafeSet(rw.Read...) for _, r := range n.Read { if currentRead.Contains(r) { return out, authErr(http.StatusConflict, "Granting duplicate read permission %s", r) } currentRead.Add(r) } currentWrite := types.NewUnsafeSet(rw.Write...) for _, w := range n.Write { if currentWrite.Contains(w) { return out, authErr(http.StatusConflict, "Granting duplicate write permission %s", w) } currentWrite.Add(w) } out.Read = currentRead.Values() out.Write = currentWrite.Values() sort.Strings(out.Read) sort.Strings(out.Write) return out, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/agent/server.go#L84-L119
func (srv *Server) StartServe() error { var err error srv.ln, err = net.Listen(srv.network, srv.address) if err != nil { return err } var opts []grpc.ServerOption opts = append(opts, grpc.MaxRecvMsgSize(int(maxRequestBytes+grpcOverheadBytes))) opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes)) opts = append(opts, grpc.MaxConcurrentStreams(maxStreams)) srv.grpcServer = grpc.NewServer(opts...) rpcpb.RegisterTransportServer(srv.grpcServer, srv) srv.lg.Info( "gRPC server started", zap.String("address", srv.address), zap.String("listener-address", srv.ln.Addr().String()), ) err = srv.grpcServer.Serve(srv.ln) if err != nil && strings.Contains(err.Error(), "use of closed network connection") { srv.lg.Info( "gRPC server is shut down", zap.String("address", srv.address), zap.Error(err), ) } else { srv.lg.Warn( "gRPC server returned with error", zap.String("address", srv.address), zap.Error(err), ) } return err }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/dir_unix.go#L83-L97
func (guard *directoryLockGuard) release() error { var err error if !guard.readOnly { // It's important that we remove the pid file first. err = os.Remove(guard.path) } if closeErr := guard.f.Close(); err == nil { err = closeErr } guard.path = "" guard.f = nil return err }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/iterator.go#L161-L180
func (s *MergeIterator) initHeap() { s.h = s.h[:0] for idx, itr := range s.all { if !itr.Valid() { continue } e := &elem{itr: itr, nice: idx, reversed: s.reversed} s.h = append(s.h, e) } heap.Init(&s.h) for len(s.h) > 0 { it := s.h[0].itr if it == nil || !it.Valid() { heap.Pop(&s.h) continue } s.storeKey(s.h[0].itr) break } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/store.go#L122-L157
func (s *store) Get(nodePath string, recursive, sorted bool) (*Event, error) { var err *v2error.Error s.worldLock.RLock() defer s.worldLock.RUnlock() defer func() { if err == nil { s.Stats.Inc(GetSuccess) if recursive { reportReadSuccess(GetRecursive) } else { reportReadSuccess(Get) } return } s.Stats.Inc(GetFail) if recursive { reportReadFailure(GetRecursive) } else { reportReadFailure(Get) } }() n, err := s.internalGet(nodePath) if err != nil { return nil, err } e := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex) e.EtcdIndex = s.CurrentIndex e.Node.loadInternalNode(n, recursive, sorted, s.clock) return e, nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/raft.go#L708-L716
func (r *Raft) quorumSize() int { voters := 0 for _, server := range r.configurations.latest.Servers { if server.Suffrage == Voter { voters++ } } return voters/2 + 1 }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/net_transport.go#L452-L455
func (n *NetworkTransport) EncodePeer(id ServerID, p ServerAddress) []byte { address := n.getProviderAddressOrFallback(id, p) return []byte(address) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/logger.go#L42-L47
func (opt *Options) Infof(format string, v ...interface{}) { if opt.Logger == nil { return } opt.Logger.Infof(format, v...) }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/method_override.go#L15-L21
func MethodOverride(res http.ResponseWriter, req *http.Request) { if req.Method == "POST" { req.Method = defaults.String(req.FormValue("_method"), "POST") req.Form.Del("_method") req.PostForm.Del("_method") } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/store.go#L191-L245
func (s *store) Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error) { var err *v2error.Error s.worldLock.Lock() defer s.worldLock.Unlock() defer func() { if err == nil { s.Stats.Inc(SetSuccess) reportWriteSuccess(Set) return } s.Stats.Inc(SetFail) reportWriteFailure(Set) }() // Get prevNode value n, getErr := s.internalGet(nodePath) if getErr != nil && getErr.ErrorCode != v2error.EcodeKeyNotFound { err = getErr return nil, err } if expireOpts.Refresh { if getErr != nil { err = getErr return nil, err } value = n.Value } // Set new value e, err := s.internalCreate(nodePath, dir, value, false, true, expireOpts.ExpireTime, Set) if err != nil { return nil, err } e.EtcdIndex = s.CurrentIndex // Put prevNode into event if getErr == nil { prev := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex) prev.Node.loadInternalNode(n, false, false, s.clock) e.PrevNode = prev.Node } if !expireOpts.Refresh { s.WatcherHub.notify(e) } else { e.SetRefresh() s.WatcherHub.add(e) } return e, nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/raft.go#L1013-L1144
func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) { defer metrics.MeasureSince([]string{"raft", "rpc", "appendEntries"}, time.Now()) // Setup a response resp := &AppendEntriesResponse{ RPCHeader: r.getRPCHeader(), Term: r.getCurrentTerm(), LastLog: r.getLastIndex(), Success: false, NoRetryBackoff: false, } var rpcErr error defer func() { rpc.Respond(resp, rpcErr) }() // Ignore an older term if a.Term < r.getCurrentTerm() { return } // Increase the term if we see a newer one, also transition to follower // if we ever get an appendEntries call if a.Term > r.getCurrentTerm() || r.getState() != Follower { // Ensure transition to follower r.setState(Follower) r.setCurrentTerm(a.Term) resp.Term = a.Term } // Save the current leader r.setLeader(ServerAddress(r.trans.DecodePeer(a.Leader))) // Verify the last log entry if a.PrevLogEntry > 0 { lastIdx, lastTerm := r.getLastEntry() var prevLogTerm uint64 if a.PrevLogEntry == lastIdx { prevLogTerm = lastTerm } else { var prevLog Log if err := r.logs.GetLog(a.PrevLogEntry, &prevLog); err != nil { r.logger.Warn(fmt.Sprintf("Failed to get previous log: %d %v (last: %d)", a.PrevLogEntry, err, lastIdx)) resp.NoRetryBackoff = true return } prevLogTerm = prevLog.Term } if a.PrevLogTerm != prevLogTerm { r.logger.Warn(fmt.Sprintf("Previous log term mis-match: ours: %d remote: %d", prevLogTerm, a.PrevLogTerm)) resp.NoRetryBackoff = true return } } // Process any new entries if len(a.Entries) > 0 { start := time.Now() // Delete any conflicting entries, skip any duplicates lastLogIdx, _ := r.getLastLog() var newEntries []*Log for i, entry := range a.Entries { if entry.Index > lastLogIdx { newEntries = a.Entries[i:] break } var storeEntry Log if err := r.logs.GetLog(entry.Index, &storeEntry); err != nil { r.logger.Warn(fmt.Sprintf("Failed to get log entry %d: %v", entry.Index, err)) return } if entry.Term != storeEntry.Term { r.logger.Warn(fmt.Sprintf("Clearing log suffix from %d to %d", entry.Index, lastLogIdx)) if err := r.logs.DeleteRange(entry.Index, lastLogIdx); err != nil { r.logger.Error(fmt.Sprintf("Failed to clear log suffix: %v", err)) return } if entry.Index <= r.configurations.latestIndex { r.configurations.latest = r.configurations.committed r.configurations.latestIndex = r.configurations.committedIndex } newEntries = a.Entries[i:] break } } if n := len(newEntries); n > 0 { // Append the new entries if err := r.logs.StoreLogs(newEntries); err != nil { r.logger.Error(fmt.Sprintf("Failed to append to logs: %v", err)) // TODO: leaving r.getLastLog() in the wrong // state if there was a truncation above return } // Handle any new configuration changes for _, newEntry := range newEntries { r.processConfigurationLogEntry(newEntry) } // Update the lastLog last := newEntries[n-1] r.setLastLog(last.Index, last.Term) } metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "storeLogs"}, start) } // Update the commit index if a.LeaderCommitIndex > 0 && a.LeaderCommitIndex > r.getCommitIndex() { start := time.Now() idx := min(a.LeaderCommitIndex, r.getLastIndex()) r.setCommitIndex(idx) if r.configurations.latestIndex <= idx { r.configurations.committed = r.configurations.latest r.configurations.committedIndex = r.configurations.latestIndex } r.processLogs(idx, nil) metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "processLogs"}, start) } // Everything went well, set success resp.Success = true r.setLastContact() return }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/rawnode.go#L186-L195
func (rn *RawNode) Step(m pb.Message) error { // ignore unexpected local messages receiving over network if IsLocalMsg(m.Type) { return ErrStepLocalMsg } if pr := rn.raft.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) { return rn.raft.Step(m) } return ErrStepPeerNotFound }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/commitment.go#L74-L81
func (c *commitment) match(server ServerID, matchIndex uint64) { c.Lock() defer c.Unlock() if prev, hasVote := c.matchIndexes[server]; hasVote && matchIndex > prev { c.matchIndexes[server] = matchIndex c.recalculate() } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/artifact-uploader/main.go#L91-L95
func (o *Options) AddFlags(flags *flag.FlagSet) { flags.IntVar(&o.NumWorkers, "num-workers", 25, "Number of threads to use for processing updates.") flags.StringVar(&o.ProwJobNamespace, "prow-job-ns", "", "Namespace containing ProwJobs.") o.Options.AddFlags(flags) }