id
stringlengths
95
167
text
stringlengths
69
15.9k
title
stringclasses
1 value
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/external-plugins/cherrypicker/server.go#L58-L71
func HelpProvider(enabledRepos []string) (*pluginhelp.PluginHelp, error) { pluginHelp := &pluginhelp.PluginHelp{ Description: `The cherrypick plugin is used for cherrypicking PRs across branches. For every successful cherrypick invocation a new PR is opened against the target branch and assigned to the requester. If the parent PR contains a release note, it is copied to the cherrypick PR.`, } pluginHelp.AddCommand(pluginhelp.Command{ Usage: "/cherrypick [branch]", Description: "Cherrypick a PR to a different branch. This command works both in merged PRs (the cherrypick PR is opened immediately) and open PRs (the cherrypick PR opens as soon as the original PR merges).", Featured: true, // depends on how the cherrypick server runs; needs auth by default (--allow-all=false) WhoCanUse: "Members of the trusted organization for the repo.", Examples: []string{"/cherrypick release-3.9"}, }) return pluginHelp, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/flags/unique_strings.go#L32-L38
func (us *UniqueStringsValue) Set(s string) error { us.Values = make(map[string]struct{}) for _, v := range strings.Split(s, ",") { us.Values[v] = struct{}{} } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/lease_command.go#L88-L101
func leaseRevokeCommandFunc(cmd *cobra.Command, args []string) { if len(args) != 1 { ExitWithError(ExitBadArgs, fmt.Errorf("lease revoke command needs 1 argument")) } id := leaseFromArgs(args[0]) ctx, cancel := commandCtx(cmd) resp, err := mustClientFromCmd(cmd).Revoke(ctx, id) cancel() if err != nil { ExitWithError(ExitError, fmt.Errorf("failed to revoke lease (%v)", err)) } display.Revoke(id, *resp) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/mmap_unix.go#L31-L37
func Mmap(fd *os.File, writable bool, size int64) ([]byte, error) { mtype := unix.PROT_READ if writable { mtype |= unix.PROT_WRITE } return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/githuboauth/githuboauth.go#L119-L140
func (ga *Agent) HandleLogout(client OAuthClient) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { accessTokenSession, err := ga.gc.CookieStore.Get(r, tokenSession) if err != nil { ga.serverError(w, "get cookie", err) return } // Clear session accessTokenSession.Options.MaxAge = -1 if err := accessTokenSession.Save(r, w); err != nil { ga.serverError(w, "Save invalidated session on log out", err) return } loginCookie, err := r.Cookie(loginSession) if err == nil { loginCookie.MaxAge = -1 loginCookie.Expires = time.Now().Add(-time.Hour * 24) http.SetCookie(w, loginCookie) } http.Redirect(w, r, ga.gc.FinalRedirectURL, http.StatusFound) } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/requiresig/requiresig.go#L146-L179
func handle(log *logrus.Entry, ghc githubClient, cp commentPruner, ie *github.IssueEvent, mentionRe *regexp.Regexp) error { // Ignore PRs, closed issues, and events that aren't new issues or sig label // changes. if !shouldReact(mentionRe, ie) { return nil } org := ie.Repo.Owner.Login repo := ie.Repo.Name number := ie.Issue.Number hasSigLabel := hasSigLabel(ie.Issue.Labels) hasNeedsSigLabel := github.HasLabel(labels.NeedsSig, ie.Issue.Labels) if hasSigLabel && hasNeedsSigLabel { if err := ghc.RemoveLabel(org, repo, number, labels.NeedsSig); err != nil { log.WithError(err).Errorf("Failed to remove %s label.", labels.NeedsSig) } botName, err := ghc.BotName() if err != nil { return fmt.Errorf("error getting bot name: %v", err) } cp.PruneComments(shouldPrune(log, botName)) } else if !hasSigLabel && !hasNeedsSigLabel { if err := ghc.AddLabel(org, repo, number, labels.NeedsSig); err != nil { log.WithError(err).Errorf("Failed to add %s label.", labels.NeedsSig) } msg := plugins.FormatResponse(ie.Issue.User.Login, needsSIGMessage, needsSIGDetails) if err := ghc.CreateComment(org, repo, number, msg); err != nil { log.WithError(err).Error("Failed to create comment.") } } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/watch_command.go#L46-L59
func NewWatchCommand() *cobra.Command { cmd := &cobra.Command{ Use: "watch [options] [key or prefix] [range_end] [--] [exec-command arg1 arg2 ...]", Short: "Watches events stream on keys or prefixes", Run: watchCommandFunc, } cmd.Flags().BoolVarP(&watchInteractive, "interactive", "i", false, "Interactive mode") cmd.Flags().BoolVar(&watchPrefix, "prefix", false, "Watch on a prefix if prefix is set") cmd.Flags().Int64Var(&watchRev, "rev", 0, "Revision to start watching") cmd.Flags().BoolVar(&watchPrevKey, "prev-kv", false, "get the previous key-value pair before the event happens") return cmd }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/agent/handler.go#L140-L175
func (srv *Server) runEtcd() error { errc := make(chan error) go func() { time.Sleep(5 * time.Second) // server advertise client/peer listener had to start first // before setting up proxy listener errc <- srv.startProxy() }() if srv.etcdCmd != nil { srv.lg.Info( "starting etcd command", zap.String("command-path", srv.etcdCmd.Path), ) err := srv.etcdCmd.Start() perr := <-errc srv.lg.Info( "started etcd command", zap.String("command-path", srv.etcdCmd.Path), zap.Errors("errors", []error{err, perr}), ) if err != nil { return err } return perr } select { case <-srv.etcdServer.Server.ReadyNotify(): srv.lg.Info("embedded etcd is ready") case <-time.After(time.Minute): srv.etcdServer.Close() return fmt.Errorf("took too long to start %v", <-srv.etcdServer.Err()) } return <-errc }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/client.go#L164-L175
func (c *Client) Sync(ctx context.Context) error { mresp, err := c.MemberList(ctx) if err != nil { return err } var eps []string for _, m := range mresp.Members { eps = append(eps, m.ClientURLs...) } c.SetEndpoints(eps...) return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3compactor/periodic.go#L206-L210
func (pc *Periodic) Pause() { pc.mu.Lock() pc.paused = true pc.mu.Unlock() }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/velodrome/transform/plugins/events.go#L100-L102
func (l LabelEvent) Match(eventName, label string) bool { return eventName == "labeled" && label == l.Label }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/rafthttp/http.go#L78-L86
func newPipelineHandler(t *Transport, r Raft, cid types.ID) http.Handler { return &pipelineHandler{ lg: t.Logger, localID: t.ID, tr: t, r: r, cid: cid, } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/boskos/mason/mason.go#L513-L520
func (m *Mason) UpdateConfigs(storagePath string) error { configs, err := ParseConfig(storagePath) if err != nil { logrus.WithError(err).Error("unable to parse config") return err } return m.storage.SyncConfigs(configs) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/ghproxy/ghcache/ghcache.go#L176-L185
func NewDiskCache(delegate http.RoundTripper, cacheDir string, cacheSizeGB, maxConcurrency int) http.RoundTripper { return NewFromCache(delegate, diskcache.NewWithDiskv( diskv.New(diskv.Options{ BasePath: path.Join(cacheDir, "data"), TempDir: path.Join(cacheDir, "temp"), CacheSizeMax: uint64(cacheSizeGB) * uint64(1000000000), // convert G to B })), maxConcurrency, ) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/pipeline/controller.go#L349-L442
func reconcile(c reconciler, key string) error { logrus.Debugf("reconcile: %s\n", key) ctx, namespace, name, err := fromKey(key) if err != nil { runtime.HandleError(err) return nil } var wantPipelineRun bool pj, err := c.getProwJob(name) switch { case apierrors.IsNotFound(err): // Do not want pipeline case err != nil: return fmt.Errorf("get prowjob: %v", err) case pj.Spec.Agent != prowjobv1.TektonAgent: // Do not want a pipeline for this job case pjutil.ClusterToCtx(pj.Spec.Cluster) != ctx: // Build is in wrong cluster, we do not want this build logrus.Warnf("%s found in context %s not %s", key, ctx, pjutil.ClusterToCtx(pj.Spec.Cluster)) case pj.DeletionTimestamp == nil: wantPipelineRun = true } var havePipelineRun bool p, err := c.getPipelineRun(ctx, namespace, name) switch { case apierrors.IsNotFound(err): // Do not have a pipeline case err != nil: return fmt.Errorf("get pipelinerun %s: %v", key, err) case p.DeletionTimestamp == nil: havePipelineRun = true } var newPipelineRun bool switch { case !wantPipelineRun: if !havePipelineRun { if pj != nil && pj.Spec.Agent == prowjobv1.TektonAgent { logrus.Infof("Observed deleted: %s", key) } return nil } // Skip deleting if the pipeline run is not created by prow switch v, ok := p.Labels[kube.CreatedByProw]; { case !ok, v != "true": return nil } logrus.Infof("Delete PipelineRun/%s", key) if err = c.deletePipelineRun(ctx, namespace, name); err != nil { return fmt.Errorf("delete pipelinerun: %v", err) } return nil case finalState(pj.Status.State): logrus.Infof("Observed finished: %s", key) return nil case wantPipelineRun && pj.Spec.PipelineRunSpec == nil: return fmt.Errorf("nil PipelineRunSpec in ProwJob/%s", key) case wantPipelineRun && !havePipelineRun: id, url, err := c.pipelineID(*pj) if err != nil { return fmt.Errorf("failed to get pipeline id: %v", err) } pj.Status.BuildID = id pj.Status.URL = url newPipelineRun = true pr := makePipelineGitResource(*pj) logrus.Infof("Create PipelineResource/%s", key) if pr, err = c.createPipelineResource(ctx, namespace, pr); err != nil { return fmt.Errorf("create PipelineResource/%s: %v", key, err) } newp, err := makePipelineRun(*pj, pr) if err != nil { return fmt.Errorf("make PipelineRun/%s: %v", key, err) } logrus.Infof("Create PipelineRun/%s", key) p, err = c.createPipelineRun(ctx, namespace, newp) if err != nil { jerr := fmt.Errorf("start pipeline: %v", err) // Set the prow job in error state to avoid an endless loop when // the pipeline cannot be executed (e.g. referenced pipeline does not exist) return updateProwJobState(c, key, newPipelineRun, pj, prowjobv1.ErrorState, jerr.Error()) } } if p == nil { return fmt.Errorf("no pipelinerun found or created for %q, wantPipelineRun was %v", key, wantPipelineRun) } wantState, wantMsg := prowJobStatus(p.Status) return updateProwJobState(c, key, newPipelineRun, pj, wantState, wantMsg) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/approve/approvers/owners.go#L140-L155
func (o Owners) KeepCoveringApprovers(reverseMap map[string]sets.String, knownApprovers sets.String, potentialApprovers []string) sets.String { if len(potentialApprovers) == 0 { o.log.Debug("No potential approvers exist to filter for relevance. Does this repo have OWNERS files?") } keptApprovers := sets.NewString() unapproved := o.temporaryUnapprovedFiles(knownApprovers) for _, suggestedApprover := range o.GetSuggestedApprovers(reverseMap, potentialApprovers).List() { if reverseMap[suggestedApprover].Intersection(unapproved).Len() != 0 { keptApprovers.Insert(suggestedApprover) } } return keptApprovers }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv2/command/get_command.go#L44-L66
func getCommandFunc(c *cli.Context, ki client.KeysAPI) { if len(c.Args()) == 0 { handleError(c, ExitBadArgs, errors.New("key required")) } key := c.Args()[0] sorted := c.Bool("sort") quorum := c.Bool("quorum") ctx, cancel := contextWithTotalTimeout(c) resp, err := ki.Get(ctx, key, &client.GetOptions{Sort: sorted, Quorum: quorum}) cancel() if err != nil { handleError(c, ExitServerError, err) } if resp.Node.Dir { fmt.Fprintln(os.Stderr, fmt.Sprintf("%s: is a directory", resp.Node.Key)) os.Exit(1) } printResponseKey(resp, c.GlobalString("output")) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/pod-utils/gcs/upload.go#L98-L107
func DataUploadWithMetadata(src io.Reader, metadata map[string]string) UploadFunc { return func(obj *storage.ObjectHandle) error { writer := obj.NewWriter(context.Background()) writer.Metadata = metadata _, copyErr := io.Copy(writer, src) closeErr := writer.Close() return errorutil.NewAggregate(copyErr, closeErr) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/adt/interval_tree.go#L415-L417
func (ivt *IntervalTree) Visit(ivl Interval, ivv IntervalVisitor) { ivt.root.visit(&ivl, func(n *intervalNode) bool { return ivv(&n.iv) }) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/config/branch_protection.go#L167-L175
func (bp BranchProtection) GetOrg(name string) *Org { o, ok := bp.Orgs[name] if ok { o.Policy = bp.Apply(o.Policy) } else { o.Policy = bp.Policy } return &o }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/db.go#L820-L869
func (db *DB) handleFlushTask(ft flushTask) error { if !ft.mt.Empty() { // Store badger head even if vptr is zero, need it for readTs db.opt.Debugf("Storing value log head: %+v\n", ft.vptr) db.elog.Printf("Storing offset: %+v\n", ft.vptr) offset := make([]byte, vptrSize) ft.vptr.Encode(offset) // Pick the max commit ts, so in case of crash, our read ts would be higher than all the // commits. headTs := y.KeyWithTs(head, db.orc.nextTs()) ft.mt.Put(headTs, y.ValueStruct{Value: offset}) // Also store lfDiscardStats before flushing memtables discardStatsKey := y.KeyWithTs(lfDiscardStatsKey, 1) ft.mt.Put(discardStatsKey, y.ValueStruct{Value: db.vlog.encodedDiscardStats()}) } fileID := db.lc.reserveFileID() fd, err := y.CreateSyncedFile(table.NewFilename(fileID, db.opt.Dir), true) if err != nil { return y.Wrap(err) } // Don't block just to sync the directory entry. dirSyncCh := make(chan error) go func() { dirSyncCh <- syncDir(db.opt.Dir) }() err = writeLevel0Table(ft, fd) dirSyncErr := <-dirSyncCh if err != nil { db.elog.Errorf("ERROR while writing to level 0: %v", err) return err } if dirSyncErr != nil { // Do dir sync as best effort. No need to return due to an error there. db.elog.Errorf("ERROR while syncing level directory: %v", dirSyncErr) } tbl, err := table.OpenTable(fd, db.opt.TableLoadingMode, nil) if err != nil { db.elog.Printf("ERROR while opening table: %v", err) return err } // We own a ref on tbl. err = db.lc.addLevel0Table(tbl) // This will incrRef (if we don't error, sure) tbl.DecrRef() // Releases our ref. return err }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/jenkins/jenkins.go#L403-L420
func (c *Client) doRequest(method, path string) (*http.Response, error) { req, err := http.NewRequest(method, path, nil) if err != nil { return nil, err } if c.authConfig != nil { if c.authConfig.Basic != nil { req.SetBasicAuth(c.authConfig.Basic.User, string(c.authConfig.Basic.GetToken())) } if c.authConfig.BearerToken != nil { req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.authConfig.BearerToken.GetToken())) } if c.authConfig.CSRFProtect && c.authConfig.csrfRequestField != "" && c.authConfig.csrfToken != "" { req.Header.Set(c.authConfig.csrfRequestField, c.authConfig.csrfToken) } } return c.client.Do(req) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/boskos/client/client.go#L286-L298
func (c *Client) UpdateOne(name, state string, userData *common.UserData) error { c.lock.Lock() defer c.lock.Unlock() r, err := c.storage.Get(name) if err != nil { return fmt.Errorf("no resource name %v", name) } if err := c.update(r.GetName(), state, userData); err != nil { return err } return c.updateLocalResource(r, state, userData) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/integration/cluster.go#L1030-L1047
func (m *member) Stop(t testing.TB) { lg.Info( "stopping a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), zap.String("grpc-address", m.grpcAddr), ) m.Close() m.serverClosers = nil lg.Info( "stopped a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), zap.String("grpc-address", m.grpcAddr), ) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/adt/interval_tree.go#L138-L161
func (x *intervalNode) visit(iv *Interval, nv nodeVisitor) bool { if x == nil { return true } v := iv.Compare(&x.iv.Ivl) switch { case v < 0: if !x.left.visit(iv, nv) { return false } case v > 0: maxiv := Interval{x.iv.Ivl.Begin, x.max} if maxiv.Compare(iv) == 0 { if !x.left.visit(iv, nv) || !x.right.visit(iv, nv) { return false } } default: if !x.left.visit(iv, nv) || !nv(x) || !x.right.visit(iv, nv) { return false } } return true }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/velodrome/fetcher/conversion.go#L133-L149
func newAssignees(issueID int, gAssignees []*github.User, repository string) ([]sql.Assignee, error) { assignees := []sql.Assignee{} repository = strings.ToLower(repository) for _, assignee := range gAssignees { if assignee != nil && assignee.Login == nil { return nil, fmt.Errorf("Assignee is missing Login field") } assignees = append(assignees, sql.Assignee{ IssueID: strconv.Itoa(issueID), Name: *assignee.Login, Repository: repository, }) } return assignees, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/get_command.go#L58-L75
func getCommandFunc(cmd *cobra.Command, args []string) { key, opts := getGetOp(args) ctx, cancel := commandCtx(cmd) resp, err := mustClientFromCmd(cmd).Get(ctx, key, opts...) cancel() if err != nil { ExitWithError(ExitError, err) } if printValueOnly { dp, simple := (display).(*simplePrinter) if !simple { ExitWithError(ExitBadArgs, fmt.Errorf("print-value-only is only for `--write-out=simple`")) } dp.valueOnly = true } display.Get(*resp) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/boskos/crds/resources_config_crd.go#L116-L121
func (in *ResourcesConfigObject) FromItem(i common.Item) { c, err := common.ItemToResourcesConfig(i) if err == nil { in.fromConfig(c) } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/respond.go#L36-L47
func FormatResponse(to, message, reason string) string { format := `@%s: %s <details> %s %s </details>` return fmt.Sprintf(format, to, message, reason, AboutThisBotWithoutCommands) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/member_command.go#L84-L96
func NewMemberListCommand() *cobra.Command { cc := &cobra.Command{ Use: "list", Short: "Lists all members in the cluster", Long: `When --write-out is set to simple, this command prints out comma-separated member lists for each endpoint. The items in the lists are ID, Status, Name, Peer Addrs, Client Addrs. `, Run: memberListCommandFunc, } return cc }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/check.go#L136-L266
func newCheckPerfCommand(cmd *cobra.Command, args []string) { var checkPerfAlias = map[string]string{ "s": "s", "small": "s", "m": "m", "medium": "m", "l": "l", "large": "l", "xl": "xl", "xLarge": "xl", } model, ok := checkPerfAlias[checkPerfLoad] if !ok { ExitWithError(ExitBadFeature, fmt.Errorf("unknown load option %v", checkPerfLoad)) } cfg := checkPerfCfgMap[model] requests := make(chan v3.Op, cfg.clients) limit := rate.NewLimiter(rate.Limit(cfg.limit), 1) cc := clientConfigFromCmd(cmd) clients := make([]*v3.Client, cfg.clients) for i := 0; i < cfg.clients; i++ { clients[i] = cc.mustClient() } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(cfg.duration)*time.Second) resp, err := clients[0].Get(ctx, checkPerfPrefix, v3.WithPrefix(), v3.WithLimit(1)) cancel() if err != nil { ExitWithError(ExitError, err) } if len(resp.Kvs) > 0 { ExitWithError(ExitInvalidInput, fmt.Errorf("prefix %q has keys. Delete with etcdctl del --prefix %s first", checkPerfPrefix, checkPerfPrefix)) } ksize, vsize := 256, 1024 k, v := make([]byte, ksize), string(make([]byte, vsize)) bar := pb.New(cfg.duration) bar.Format("Bom !") bar.Start() r := report.NewReport("%4.4f") var wg sync.WaitGroup wg.Add(len(clients)) for i := range clients { go func(c *v3.Client) { defer wg.Done() for op := range requests { st := time.Now() _, derr := c.Do(context.Background(), op) r.Results() <- report.Result{Err: derr, Start: st, End: time.Now()} } }(clients[i]) } go func() { cctx, ccancel := context.WithTimeout(context.Background(), time.Duration(cfg.duration)*time.Second) defer ccancel() for limit.Wait(cctx) == nil { binary.PutVarint(k, rand.Int63n(math.MaxInt64)) requests <- v3.OpPut(checkPerfPrefix+string(k), v) } close(requests) }() go func() { for i := 0; i < cfg.duration; i++ { time.Sleep(time.Second) bar.Add(1) } bar.Finish() }() sc := r.Stats() wg.Wait() close(r.Results()) s := <-sc ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) dresp, err := clients[0].Delete(ctx, checkPerfPrefix, v3.WithPrefix()) cancel() if err != nil { ExitWithError(ExitError, err) } if autoCompact { compact(clients[0], dresp.Header.Revision) } if autoDefrag { for _, ep := range clients[0].Endpoints() { defrag(clients[0], ep) } } ok = true if len(s.ErrorDist) != 0 { fmt.Println("FAIL: too many errors") for k, v := range s.ErrorDist { fmt.Printf("FAIL: ERROR(%v) -> %d\n", k, v) } ok = false } if s.RPS/float64(cfg.limit) <= 0.9 { fmt.Printf("FAIL: Throughput too low: %d writes/s\n", int(s.RPS)+1) ok = false } else { fmt.Printf("PASS: Throughput is %d writes/s\n", int(s.RPS)+1) } if s.Slowest > 0.5 { // slowest request > 500ms fmt.Printf("Slowest request took too long: %fs\n", s.Slowest) ok = false } else { fmt.Printf("PASS: Slowest request took %fs\n", s.Slowest) } if s.Stddev > 0.1 { // stddev > 100ms fmt.Printf("Stddev too high: %fs\n", s.Stddev) ok = false } else { fmt.Printf("PASS: Stddev is %fs\n", s.Stddev) } if ok { fmt.Println("PASS") } else { fmt.Println("FAIL") os.Exit(ExitError) } }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/state.go#L156-L160
func (r *raftState) getLastIndex() uint64 { r.lastLock.Lock() defer r.lastLock.Unlock() return max(r.lastLogIndex, r.lastSnapshotIndex) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/adt/interval_tree.go#L442-L452
func (ivt *IntervalTree) Intersects(iv Interval) bool { x := ivt.root for x != nil && iv.Compare(&x.iv.Ivl) != 0 { if x.left != nil && x.left.max.Compare(iv.Begin) > 0 { x = x.left } else { x = x.right } } return x != nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/util.go#L56-L68
func generateUUID() string { buf := make([]byte, 16) if _, err := crand.Read(buf); err != nil { panic(fmt.Errorf("failed to read random bytes: %v", err)) } return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", buf[0:4], buf[4:6], buf[6:8], buf[8:10], buf[10:16]) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/githuboauth/githuboauth.go#L227-L231
func (ga *Agent) serverError(w http.ResponseWriter, action string, err error) { ga.logger.WithError(err).Errorf("Error %s.", action) msg := fmt.Sprintf("500 Internal server error %s: %v", action, err) http.Error(w, msg, http.StatusInternalServerError) }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/net_transport.go#L175-L187
func NewNetworkTransport( stream StreamLayer, maxPool int, timeout time.Duration, logOutput io.Writer, ) *NetworkTransport { if logOutput == nil { logOutput = os.Stderr } logger := log.New(logOutput, "", log.LstdFlags) config := &NetworkTransportConfig{Stream: stream, MaxPool: maxPool, Timeout: timeout, Logger: logger} return NewNetworkTransportWithConfig(config) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/plugins.go#L208-L223
func (pa *ConfigAgent) Load(path string) error { b, err := ioutil.ReadFile(path) if err != nil { return err } np := &Configuration{} if err := yaml.Unmarshal(b, np); err != nil { return err } if err := np.Validate(); err != nil { return err } pa.Set(np) return nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/fakegithub/fakegithub.go#L204-L210
func (f *FakeClient) GetPullRequest(owner, repo string, number int) (*github.PullRequest, error) { val, exists := f.PullRequests[number] if !exists { return nil, fmt.Errorf("Pull request number %d does not exit", number) } return val, nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/api.go#L681-L691
func (r *Raft) GetConfiguration() ConfigurationFuture { configReq := &configurationsFuture{} configReq.init() select { case <-r.shutdownCh: configReq.respond(ErrRaftShutdown) return configReq case r.configurationsCh <- configReq: return configReq } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/config/agent.go#L123-L144
func (ca *Agent) Set(c *Config) { ca.mut.Lock() defer ca.mut.Unlock() var oldConfig Config if ca.c != nil { oldConfig = *ca.c } delta := Delta{oldConfig, *c} ca.c = c for _, subscription := range ca.subscriptions { go func(sub DeltaChan) { // wait a minute to send each event end := time.NewTimer(time.Minute) select { case sub <- delta: case <-end.C: } if !end.Stop() { // prevent new events <-end.C // drain the pending event } }(subscription) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2auth/auth.go#L601-L638
func (rw RWPermission) Revoke(lg *zap.Logger, n RWPermission) (RWPermission, error) { var out RWPermission currentRead := types.NewUnsafeSet(rw.Read...) for _, r := range n.Read { if !currentRead.Contains(r) { if lg != nil { lg.Info( "revoking ungranted read permission", zap.String("read-permission", r), ) } else { plog.Noticef("revoking ungranted read permission %s", r) } continue } currentRead.Remove(r) } currentWrite := types.NewUnsafeSet(rw.Write...) for _, w := range n.Write { if !currentWrite.Contains(w) { if lg != nil { lg.Info( "revoking ungranted write permission", zap.String("write-permission", w), ) } else { plog.Noticef("revoking ungranted write permission %s", w) } continue } currentWrite.Remove(w) } out.Read = currentRead.Values() out.Write = currentWrite.Values() sort.Strings(out.Read) sort.Strings(out.Write) return out, nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/velodrome/transform/plugins/comment_counter.go#L62-L75
func (c *CommentCounterPlugin) ReceiveComment(comment sql.Comment) []Point { points := []Point{} for _, matcher := range c.matcher { if matcher.MatchString(comment.Body) { points = append(points, Point{ Values: map[string]interface{}{ "comment": 1, }, Date: comment.CommentCreatedAt, }) } } return points }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/y.go#L70-L76
func CreateSyncedFile(filename string, sync bool) (*os.File, error) { flags := os.O_RDWR | os.O_CREATE | os.O_EXCL if sync { flags |= datasyncFileFlag } return os.OpenFile(filename, flags, 0666) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/raft.go#L685-L734
func createConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry { ents := make([]raftpb.Entry, 0) next := index + 1 found := false for _, id := range ids { if id == self { found = true continue } cc := &raftpb.ConfChange{ Type: raftpb.ConfChangeRemoveNode, NodeID: id, } e := raftpb.Entry{ Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(cc), Term: term, Index: next, } ents = append(ents, e) next++ } if !found { m := membership.Member{ ID: types.ID(self), RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}}, } ctx, err := json.Marshal(m) if err != nil { if lg != nil { lg.Panic("failed to marshal member", zap.Error(err)) } else { plog.Panicf("marshal member should never fail: %v", err) } } cc := &raftpb.ConfChange{ Type: raftpb.ConfChangeAddNode, NodeID: self, Context: ctx, } e := raftpb.Entry{ Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(cc), Term: term, Index: next, } ents = append(ents, e) } return ents }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/jenkins/jenkins.go#L329-L335
func (c *Client) GetSkipMetrics(path string) ([]byte, error) { resp, err := c.request(http.MethodGet, path, nil, false) if err != nil { return nil, err } return readResp(resp) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/pjutil/tot.go#L80-L114
func GetBuildID(name, totURL string) (string, error) { if totURL == "" { return node.Generate().String(), nil } var err error url, err := url.Parse(totURL) if err != nil { return "", fmt.Errorf("invalid tot url: %v", err) } url.Path = path.Join(url.Path, "vend", name) sleepDuration := 100 * time.Millisecond for retries := 0; retries < 10; retries++ { if retries > 0 { sleep(sleepDuration) sleepDuration = sleepDuration * 2 } var resp *http.Response resp, err = http.Get(url.String()) if err != nil { continue } defer resp.Body.Close() if resp.StatusCode != 200 { err = fmt.Errorf("got unexpected response from tot: %v", resp.Status) continue } var buf []byte buf, err = ioutil.ReadAll(resp.Body) if err == nil { return string(buf), nil } return "", err } return "", err }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/label/label.go#L99-L116
func getLabelsFromGenericMatches(matches [][]string, additionalLabels []string) []string { if len(additionalLabels) == 0 { return nil } var labels []string for _, match := range matches { parts := strings.Split(match[0], " ") if ((parts[0] != "/label") && (parts[0] != "/remove-label")) || len(parts) != 2 { continue } for _, l := range additionalLabels { if l == parts[1] { labels = append(labels, parts[1]) } } } return labels }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/gerrit/adapter/adapter.go#L122-L147
func (c *Controller) SaveLastSync(lastSync time.Time) error { if c.lastSyncFallback == "" { return nil } lastSyncUnix := strconv.FormatInt(lastSync.Unix(), 10) logrus.Infof("Writing last sync: %s", lastSyncUnix) tempFile, err := ioutil.TempFile(filepath.Dir(c.lastSyncFallback), "temp") if err != nil { return err } defer os.Remove(tempFile.Name()) err = ioutil.WriteFile(tempFile.Name(), []byte(lastSyncUnix), 0644) if err != nil { return err } err = os.Rename(tempFile.Name(), c.lastSyncFallback) if err != nil { logrus.WithError(err).Info("Rename failed, fallback to copyfile") return copyFile(tempFile.Name(), c.lastSyncFallback) } return nil }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/worker/simple.go#L68-L72
func (w Simple) Stop() error { w.Logger.Info("Stopping Simple Background Worker") w.cancel() return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/rafthttp/util.go#L134-L141
func serverVersion(h http.Header) *semver.Version { verStr := h.Get("X-Server-Version") // backward compatibility with etcd 2.0 if verStr == "" { verStr = "2.0.0" } return semver.Must(semver.NewVersion(verStr)) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/tide/history/history.go#L196-L205
func (h *History) AllRecords() map[string][]*Record { h.Lock() defer h.Unlock() res := make(map[string][]*Record, len(h.logs)) for key, log := range h.logs { res[key] = log.toSlice() } return res }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/session.go#L95-L102
func (s *Session) Close() error { s.Orphan() // if revoke takes longer than the ttl, lease is expired anyway ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second) _, err := s.client.Revoke(ctx, s.id) cancel() return err }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/config/branch_protection.go#L148-L156
func (p Policy) Apply(child Policy) Policy { return Policy{ Protect: selectBool(p.Protect, child.Protect), RequiredStatusChecks: mergeContextPolicy(p.RequiredStatusChecks, child.RequiredStatusChecks), Admins: selectBool(p.Admins, child.Admins), Restrictions: mergeRestrictions(p.Restrictions, child.Restrictions), RequiredPullRequestReviews: mergeReviewPolicy(p.RequiredPullRequestReviews, child.RequiredPullRequestReviews), } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/ep_command.go#L87-L149
func epHealthCommandFunc(cmd *cobra.Command, args []string) { flags.SetPflagsFromEnv("ETCDCTL", cmd.InheritedFlags()) initDisplayFromCmd(cmd) sec := secureCfgFromCmd(cmd) dt := dialTimeoutFromCmd(cmd) ka := keepAliveTimeFromCmd(cmd) kat := keepAliveTimeoutFromCmd(cmd) auth := authCfgFromCmd(cmd) cfgs := []*v3.Config{} for _, ep := range endpointsFromCluster(cmd) { cfg, err := newClientCfg([]string{ep}, dt, ka, kat, sec, auth) if err != nil { ExitWithError(ExitBadArgs, err) } cfgs = append(cfgs, cfg) } var wg sync.WaitGroup hch := make(chan epHealth, len(cfgs)) for _, cfg := range cfgs { wg.Add(1) go func(cfg *v3.Config) { defer wg.Done() ep := cfg.Endpoints[0] cli, err := v3.New(*cfg) if err != nil { hch <- epHealth{Ep: ep, Health: false, Error: err.Error()} return } st := time.Now() // get a random key. As long as we can get the response without an error, the // endpoint is health. ctx, cancel := commandCtx(cmd) _, err = cli.Get(ctx, "health") cancel() eh := epHealth{Ep: ep, Health: false, Took: time.Since(st).String()} // permission denied is OK since proposal goes through consensus to get it if err == nil || err == rpctypes.ErrPermissionDenied { eh.Health = true } else { eh.Error = err.Error() } hch <- eh }(cfg) } wg.Wait() close(hch) errs := false healthList := []epHealth{} for h := range hch { healthList = append(healthList, h) if h.Error != "" { errs = true } } display.EndpointHealth(healthList) if errs { ExitWithError(ExitError, fmt.Errorf("unhealthy cluster")) } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/robots/coverage/downloader/downloader.go#L41-L65
func listGcsObjects(ctx context.Context, client *storage.Client, bucketName, prefix, delim string) ( []string, error) { var objects []string it := client.Bucket(bucketName).Objects(ctx, &storage.Query{ Prefix: prefix, Delimiter: delim, }) for { attrs, err := it.Next() if err == iterator.Done { break } if err != nil { return objects, fmt.Errorf("error iterating: %v", err) } if attrs.Prefix != "" { objects = append(objects, path.Base(attrs.Prefix)) } } logrus.Info("end of listGcsObjects(...)") return objects, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/key.go#L50-L65
func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) { getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev)) for { resp, err := client.Get(ctx, pfx, getOpts...) if err != nil { return nil, err } if len(resp.Kvs) == 0 { return resp.Header, nil } lastKey := string(resp.Kvs[0].Key) if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil { return nil, err } } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/types/set.go#L78-L84
func (us *unsafeSet) Equals(other Set) bool { v1 := sort.StringSlice(us.Values()) v2 := sort.StringSlice(other.Values()) v1.Sort() v2.Sort() return reflect.DeepEqual(v1, v2) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/plugins.go#L101-L104
func RegisterPushEventHandler(name string, fn PushEventHandler, help HelpProvider) { pluginHelp[name] = help pushEventHandlers[name] = fn }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/middleware.go#L61-L64
func (ms *MiddlewareStack) Clear() { ms.stack = []MiddlewareFunc{} ms.skips = map[string]bool{} }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/approve/approvers/owners.go#L174-L181
func (o Owners) GetOwnersSet() sets.String { owners := sets.NewString() for _, fn := range o.filenames { owners.Insert(o.repo.FindApproverOwnersForFile(fn)) } o.removeSubdirs(owners) return owners }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/kube/dry_run_client.go#L70-L72
func (c *dryRunProwJobClient) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { return nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/replication.go#L456-L485
func (r *Raft) pipelineDecode(s *followerReplication, p AppendPipeline, stopCh, finishCh chan struct{}) { defer close(finishCh) respCh := p.Consumer() for { select { case ready := <-respCh: req, resp := ready.Request(), ready.Response() appendStats(string(s.peer.ID), ready.Start(), float32(len(req.Entries))) // Check for a newer term, stop running if resp.Term > req.Term { r.handleStaleTerm(s) return } // Update the last contact s.setLastContact() // Abort pipeline if not successful if !resp.Success { return } // Update our replication state updateLastAppended(s, req) case <-stopCh: return } } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/config/tide.go#L189-L219
func (tq *TideQuery) Query() string { toks := []string{"is:pr", "state:open"} for _, o := range tq.Orgs { toks = append(toks, fmt.Sprintf("org:\"%s\"", o)) } for _, r := range tq.Repos { toks = append(toks, fmt.Sprintf("repo:\"%s\"", r)) } for _, r := range tq.ExcludedRepos { toks = append(toks, fmt.Sprintf("-repo:\"%s\"", r)) } for _, b := range tq.ExcludedBranches { toks = append(toks, fmt.Sprintf("-base:\"%s\"", b)) } for _, b := range tq.IncludedBranches { toks = append(toks, fmt.Sprintf("base:\"%s\"", b)) } for _, l := range tq.Labels { toks = append(toks, fmt.Sprintf("label:\"%s\"", l)) } for _, l := range tq.MissingLabels { toks = append(toks, fmt.Sprintf("-label:\"%s\"", l)) } if tq.Milestone != "" { toks = append(toks, fmt.Sprintf("milestone:\"%s\"", tq.Milestone)) } if tq.ReviewApprovedRequired { toks = append(toks, "review:approved") } return strings.Join(toks, " ") }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/lease_command.go#L56-L73
func leaseGrantCommandFunc(cmd *cobra.Command, args []string) { if len(args) != 1 { ExitWithError(ExitBadArgs, fmt.Errorf("lease grant command needs TTL argument")) } ttl, err := strconv.ParseInt(args[0], 10, 64) if err != nil { ExitWithError(ExitBadArgs, fmt.Errorf("bad TTL (%v)", err)) } ctx, cancel := commandCtx(cmd) resp, err := mustClientFromCmd(cmd).Grant(ctx, ttl) cancel() if err != nil { ExitWithError(ExitError, fmt.Errorf("failed to grant lease (%v)", err)) } display.Grant(*resp) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/plugins.go#L389-L416
func EventsForPlugin(name string) []string { var events []string if _, ok := issueHandlers[name]; ok { events = append(events, "issue") } if _, ok := issueCommentHandlers[name]; ok { events = append(events, "issue_comment") } if _, ok := pullRequestHandlers[name]; ok { events = append(events, "pull_request") } if _, ok := pushEventHandlers[name]; ok { events = append(events, "push") } if _, ok := reviewEventHandlers[name]; ok { events = append(events, "pull_request_review") } if _, ok := reviewCommentEventHandlers[name]; ok { events = append(events, "pull_request_review_comment") } if _, ok := statusEventHandlers[name]; ok { events = append(events, "status") } if _, ok := genericCommentHandlers[name]; ok { events = append(events, "GenericCommentEvent (any event for user text)") } return events }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/peribolos/main.go#L904-L951
func configureTeamRepos(client teamRepoClient, githubTeams map[string]github.Team, name, orgName string, team org.Team) error { gt, ok := githubTeams[name] if !ok { // configureTeams is buggy if this is the case return fmt.Errorf("%s not found in id list", name) } want := team.Repos have := map[string]github.RepoPermissionLevel{} repos, err := client.ListTeamRepos(gt.ID) if err != nil { return fmt.Errorf("failed to list team %d(%s) repos: %v", gt.ID, name, err) } for _, repo := range repos { have[repo.Name] = github.LevelFromPermissions(repo.Permissions) } actions := map[string]github.RepoPermissionLevel{} for wantRepo, wantPermission := range want { if havePermission, haveRepo := have[wantRepo]; haveRepo && havePermission == wantPermission { // nothing to do continue } // create or update this permission actions[wantRepo] = wantPermission } for haveRepo := range have { if _, wantRepo := want[haveRepo]; !wantRepo { // should remove these permissions actions[haveRepo] = github.None } } var updateErrors []error for repo, permission := range actions { var err error if permission == github.None { err = client.RemoveTeamRepo(gt.ID, orgName, repo) } else { err = client.UpdateTeamRepo(gt.ID, orgName, repo, permission) } if err != nil { updateErrors = append(updateErrors, fmt.Errorf("failed to update team %d(%s) permissions on repo %s to %s: %v", gt.ID, name, repo, permission, err)) } } return errorutil.NewAggregate(updateErrors...) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/types/urlsmap.go#L71-L80
func (c URLsMap) URLs() []string { var urls []string for _, us := range c { for _, u := range us { urls = append(urls, u.String()) } } sort.Strings(urls) return urls }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/pipeline/controller.go#L561-L587
func makePipelineGitResource(pj prowjobv1.ProwJob) *pipelinev1alpha1.PipelineResource { var revision string if pj.Spec.Refs != nil { if len(pj.Spec.Refs.Pulls) > 0 { revision = pj.Spec.Refs.Pulls[0].SHA } else { revision = pj.Spec.Refs.BaseSHA } } pr := pipelinev1alpha1.PipelineResource{ ObjectMeta: pipelineMeta(pj), Spec: pipelinev1alpha1.PipelineResourceSpec{ Type: pipelinev1alpha1.PipelineResourceTypeGit, Params: []pipelinev1alpha1.Param{ { Name: "url", Value: sourceURL(pj), }, { Name: "revision", Value: revision, }, }, }, } return &pr }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/jenkins/jenkins.go#L598-L601
func (c *Client) Build(pj *prowapi.ProwJob, buildID string) error { c.logger.WithFields(pjutil.ProwJobFields(pj)).Info("Build") return c.BuildFromSpec(&pj.Spec, buildID, pj.ObjectMeta.Name) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/kube/client.go#L557-L567
func (c *Client) GetLogTail(pod, container string, n int64) ([]byte, error) { c.log("GetLogTail", pod, n) return c.requestRetry(&request{ path: fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/log", c.namespace, pod), query: map[string]string{ // Because we want last n bytes, we fetch all lines and then limit to n bytes "tailLines": "-1", "container": container, "limitBytes": strconv.FormatInt(n, 10), }, }) }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/route_mappings.go#L59-L63
func (a *App) Redirect(status int, from, to string) *RouteInfo { return a.GET(from, func(c Context) error { return c.Redirect(status, to) }) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/approve/approvers/owners.go#L584-L592
func GenerateTemplate(templ, name string, data interface{}) (string, error) { buf := bytes.NewBufferString("") if messageTempl, err := template.New(name).Parse(templ); err != nil { return "", fmt.Errorf("failed to parse template for %s: %v", name, err) } else if err := messageTempl.Execute(buf, data); err != nil { return "", fmt.Errorf("failed to execute template for %s: %v", name, err) } return buf.String(), nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/tide/blockers/blockers.go#L61-L70
func (b Blockers) GetApplicable(org, repo, branch string) []Blocker { var res []Blocker res = append(res, b.Repo[orgRepo{org: org, repo: repo}]...) res = append(res, b.Branch[orgRepoBranch{org: org, repo: repo, branch: branch}]...) sort.Slice(res, func(i, j int) bool { return res[i].Number < res[j].Number }) return res }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/label_sync/main.go#L408-L411
func change(repo string, label Label) Update { logrus.WithField("repo", repo).WithField("label", label.Name).WithField("color", label.Color).Info("change") return Update{Why: "change", Current: &label, Wanted: &label, repo: repo} }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/plugins.go#L236-L240
func (pa *ConfigAgent) Set(pc *Configuration) { pa.mut.Lock() defer pa.mut.Unlock() pa.configuration = pc }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/lease_command.go#L172-L199
func leaseKeepAliveCommandFunc(cmd *cobra.Command, args []string) { if len(args) != 1 { ExitWithError(ExitBadArgs, fmt.Errorf("lease keep-alive command needs lease ID as argument")) } id := leaseFromArgs(args[0]) if leaseKeepAliveOnce { respc, kerr := mustClientFromCmd(cmd).KeepAliveOnce(context.TODO(), id) if kerr != nil { ExitWithError(ExitBadConnection, kerr) } display.KeepAlive(*respc) return } respc, kerr := mustClientFromCmd(cmd).KeepAlive(context.TODO(), id) if kerr != nil { ExitWithError(ExitBadConnection, kerr) } for resp := range respc { display.KeepAlive(*resp) } if _, ok := (display).(*simplePrinter); ok { fmt.Printf("lease %016x expired or revoked.\n", id) } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/gcsupload/run.go#L117-L131
func PathsForJob(options *prowapi.GCSConfiguration, spec *downwardapi.JobSpec, subdir string) (string, string, gcs.RepoPathBuilder) { builder := builderForStrategy(options.PathStrategy, options.DefaultOrg, options.DefaultRepo) jobBasePath := gcs.PathForSpec(spec, builder) if options.PathPrefix != "" { jobBasePath = path.Join(options.PathPrefix, jobBasePath) } var gcsPath string if subdir == "" { gcsPath = jobBasePath } else { gcsPath = path.Join(jobBasePath, subdir) } return jobBasePath, gcsPath, builder }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/approve/approvers/owners.go#L421-L429
func (ap Approvers) UnapprovedFiles() sets.String { unapproved := sets.NewString() for fn, approvers := range ap.GetFilesApprovers() { if len(approvers) == 0 { unapproved.Insert(fn) } } return unapproved }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/util.go#L59-L68
func voteRespMsgType(msgt pb.MessageType) pb.MessageType { switch msgt { case pb.MsgVote: return pb.MsgVoteResp case pb.MsgPreVote: return pb.MsgPreVoteResp default: panic(fmt.Sprintf("not a vote message: %s", msgt)) } }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/inmem_transport.go#L213-L223
func (i *InmemTransport) DisconnectAll() { i.Lock() defer i.Unlock() i.peers = make(map[ServerAddress]*InmemTransport) // Handle pipelines for _, pipeline := range i.pipelines { pipeline.Close() } i.pipelines = nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/wal/wal.go#L743-L771
func (w *WAL) Close() error { w.mu.Lock() defer w.mu.Unlock() if w.fp != nil { w.fp.Close() w.fp = nil } if w.tail() != nil { if err := w.sync(); err != nil { return err } } for _, l := range w.locks { if l == nil { continue } if err := l.Close(); err != nil { if w.lg != nil { w.lg.Warn("failed to close WAL", zap.Error(err)) } else { plog.Errorf("failed to unlock during closing wal: %s", err) } } } return w.dirFile.Close() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/backend/batch_tx.go#L129-L142
func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { bucket := t.tx.Bucket(bucketName) if bucket == nil { if t.backend.lg != nil { t.backend.lg.Fatal( "failed to find a bucket", zap.String("bucket-name", string(bucketName)), ) } else { plog.Fatalf("bucket %s does not exist", bucketName) } } return unsafeRange(bucket.Cursor(), key, endKey, limit) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/etcdhttp/metrics.go#L48-L64
func NewHealthHandler(hfunc func() Health) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { w.Header().Set("Allow", http.MethodGet) http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) return } h := hfunc() d, _ := json.Marshal(h) if h.Health != "true" { http.Error(w, string(d), http.StatusServiceUnavailable) return } w.WriteHeader(http.StatusOK) w.Write(d) } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/client/util.go#L48-L53
func IsUserNotFound(err error) bool { if ae, ok := err.(authError); ok { return userNotFoundRegExp.MatchString(ae.Message) } return false }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/genny/newapp/web/options.go#L19-L51
func (opts *Options) Validate() error { if opts.Options == nil { opts.Options = &core.Options{} } if err := opts.Options.Validate(); err != nil { return err } if opts.Docker != nil { if opts.Docker.App.IsZero() { opts.Docker.App = opts.App } if err := opts.Docker.Validate(); err != nil { return err } } if opts.Webpack != nil { if opts.Webpack.App.IsZero() { opts.Webpack.App = opts.App } if err := opts.Webpack.Validate(); err != nil { return err } } if opts.Standard != nil && opts.Webpack != nil { return errors.New("you can not use both webpack and standard generators") } return nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/flagutil/github.go#L114-L116
func (o *GitHubOptions) GitHubClient(secretAgent *secret.Agent, dryRun bool) (client *github.Client, err error) { return o.GitHubClientWithLogFields(secretAgent, dryRun, logrus.Fields{}) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/approve/approvers/owners.go#L64-L72
func (o Owners) GetApprovers() map[string]sets.String { ownersToApprovers := map[string]sets.String{} for fn := range o.GetOwnersSet() { ownersToApprovers[fn] = o.repo.Approvers(fn) } return ownersToApprovers }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/server.go#L871-L873
func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) { s.r.ReportSnapshot(id, status) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/adt/interval_tree.go#L104-L114
func (x *intervalNode) successor() *intervalNode { if x.right != nil { return x.right.min() } y := x.parent for y != nil && x == y.right { x = y y = y.parent } return y }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/client/curl.go#L41-L70
func printcURL(req *http.Request) error { if !cURLDebug { return nil } var ( command string b []byte err error ) if req.URL != nil { command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) } if req.Body != nil { b, err = ioutil.ReadAll(req.Body) if err != nil { return err } command += fmt.Sprintf(" -d %q", string(b)) } fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) // reset body body := bytes.NewBuffer(b) req.Body = ioutil.NopCloser(body) return nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/configuration.go#L158-L185
func checkConfiguration(configuration Configuration) error { idSet := make(map[ServerID]bool) addressSet := make(map[ServerAddress]bool) var voters int for _, server := range configuration.Servers { if server.ID == "" { return fmt.Errorf("Empty ID in configuration: %v", configuration) } if server.Address == "" { return fmt.Errorf("Empty address in configuration: %v", server) } if idSet[server.ID] { return fmt.Errorf("Found duplicate ID in configuration: %v", server.ID) } idSet[server.ID] = true if addressSet[server.Address] { return fmt.Errorf("Found duplicate address in configuration: %v", server.Address) } addressSet[server.Address] = true if server.Suffrage == Voter { voters++ } } if voters == 0 { return fmt.Errorf("Need at least one voter in configuration: %v", configuration) } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv2/command/rm_command.go#L25-L41
func NewRemoveCommand() cli.Command { return cli.Command{ Name: "rm", Usage: "remove a key or a directory", ArgsUsage: "<key>", Flags: []cli.Flag{ cli.BoolFlag{Name: "dir", Usage: "removes the key if it is an empty directory or a key-value pair"}, cli.BoolFlag{Name: "recursive, r", Usage: "removes the key and all child keys(if it is a directory)"}, cli.StringFlag{Name: "with-value", Value: "", Usage: "previous value"}, cli.IntFlag{Name: "with-index", Value: 0, Usage: "previous index"}, }, Action: func(c *cli.Context) error { rmCommandFunc(c, mustNewKeyAPI(c)) return nil }, } }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/render/js.go#L23-L33
func (e *Engine) JavaScript(names ...string) Renderer { if e.JavaScriptLayout != "" && len(names) == 1 { names = append(names, e.JavaScriptLayout) } hr := &templateRenderer{ Engine: e, contentType: "application/javascript", names: names, } return hr }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/initupload/options.go#L59-L62
func (o *Options) AddFlags(flags *flag.FlagSet) { flags.StringVar(&o.Log, "clone-log", "", "Path to the clone records log") o.Options.AddFlags(flags) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/util.go#L65-L88
func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) { var longest types.ID var oldest time.Time for _, id := range membs { tm := tp.ActiveSince(id) if tm.IsZero() { // inactive continue } if oldest.IsZero() { // first longest candidate oldest = tm longest = id } if tm.Before(oldest) { oldest = tm longest = id } } if uint64(longest) == 0 { return longest, false } return longest, true }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/util.go#L145-L154
func defrag(c *v3.Client, ep string) { fmt.Printf("Defragmenting %q\n", ep) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) _, err := c.Defragment(ctx, ep) cancel() if err != nil { ExitWithError(ExitError, err) } fmt.Printf("Defragmented %q\n", ep) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/dir_windows.go#L103-L106
func (g *directoryLockGuard) release() error { g.path = "" return syscall.CloseHandle(g.h) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/experiment/resultstore/main.go#L487-L526
func insertLink(started *gcs.Started, viewURL string) (bool, error) { if started.Metadata == nil { started.Metadata = metadata.Metadata{} } meta := started.Metadata var changed bool top, present := meta.String(resultstoreKey) if !present || top == nil || *top != viewURL { changed = true meta[resultstoreKey] = viewURL } links, present := meta.Meta(linksKey) if present && links == nil { return false, fmt.Errorf("metadata.links is not a Metadata value: %v", meta[linksKey]) } if links == nil { links = &metadata.Metadata{} changed = true } resultstoreMeta, present := links.Meta(resultstoreKey) if present && resultstoreMeta == nil { return false, fmt.Errorf("metadata.links.resultstore is not a Metadata value: %v", (*links)[resultstoreKey]) } if resultstoreMeta == nil { resultstoreMeta = &metadata.Metadata{} changed = true } val, present := resultstoreMeta.String(urlKey) if present && val == nil { return false, fmt.Errorf("metadata.links.resultstore.url is not a string value: %v", (*resultstoreMeta)[urlKey]) } if !changed && val != nil && *val == viewURL { return false, nil } (*resultstoreMeta)[urlKey] = viewURL (*links)[resultstoreKey] = *resultstoreMeta meta[linksKey] = *links return true, nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/plugins.go#L226-L230
func (pa *ConfigAgent) Config() *Configuration { pa.mut.Lock() defer pa.mut.Unlock() return pa.configuration }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/naming/grpc.go#L71-L109
func (gw *gRPCWatcher) Next() ([]*naming.Update, error) { if gw.wch == nil { // first Next() returns all addresses return gw.firstNext() } if gw.err != nil { return nil, gw.err } // process new events on target/* wr, ok := <-gw.wch if !ok { gw.err = status.Error(codes.Unavailable, ErrWatcherClosed.Error()) return nil, gw.err } if gw.err = wr.Err(); gw.err != nil { return nil, gw.err } updates := make([]*naming.Update, 0, len(wr.Events)) for _, e := range wr.Events { var jupdate naming.Update var err error switch e.Type { case etcd.EventTypePut: err = json.Unmarshal(e.Kv.Value, &jupdate) jupdate.Op = naming.Add case etcd.EventTypeDelete: err = json.Unmarshal(e.PrevKv.Value, &jupdate) jupdate.Op = naming.Delete default: continue } if err == nil { updates = append(updates, &jupdate) } } return updates, nil }