| code
				 stringlengths 66 870k | docstring
				 stringlengths 19 26.7k | func_name
				 stringlengths 1 138 | language
				 stringclasses 1
				value | repo
				 stringlengths 7 68 | path
				 stringlengths 5 324 | url
				 stringlengths 46 389 | license
				 stringclasses 7
				values | 
|---|---|---|---|---|---|---|---|
| 
	def pad_to_len(
    arr: torch.tensor,
    target_len: int,
    *,
    left_pad: bool,
    eos_token: int,
    device: torch.device,
) -> torch.tensor:
  """Pad or truncate array to given length."""
  if arr.shape[1] < target_len:
    shape_for_ones = list(arr.shape)
    shape_for_ones[1] = target_len - shape_for_ones[1]
    padded = (
        torch.ones(
            shape_for_ones,
            device=device,
            dtype=torch.long,
        )
        * eos_token
    )
    if not left_pad:
      return torch.concatenate((arr, padded), dim=1)
    else:
      return torch.concatenate((padded, arr), dim=1)
  else:
    return arr[:, :target_len] | 
	Pad or truncate array to given length. | 
	pad_to_len | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def filter_and_truncate(
    outputs: torch.tensor,
    truncation_length: Optional[int],
    eos_token_mask: torch.tensor,
) -> torch.tensor:
  """Filter and truncate outputs to given length.
  Args:
   outputs: output tensor of shape [batch_size, output_len]
   truncation_length: Length to truncate the final output. If None, then no
     truncation is applied.
   eos_token_mask: EOS token mask of shape [batch_size, output_len]
  Returns:
   output tensor of shape [batch_size, truncation_length].
  """
  if truncation_length:
    outputs = outputs[:, :truncation_length]
    truncation_mask = torch.sum(eos_token_mask, dim=1) >= truncation_length
    return outputs[truncation_mask, :]
  return outputs | 
	Filter and truncate outputs to given length.
  Args:
   outputs: output tensor of shape [batch_size, output_len]
   truncation_length: Length to truncate the final output. If None, then no
     truncation is applied.
   eos_token_mask: EOS token mask of shape [batch_size, output_len]
  Returns:
   output tensor of shape [batch_size, truncation_length].
   | 
	filter_and_truncate | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def process_outputs_for_training(
    all_outputs: Sequence[torch.Tensor],
    logits_processor: logits_processing.SynthIDLogitsProcessor,
    tokenizer: Any,
    *,
    pos_truncation_length: Optional[int],
    neg_truncation_length: Optional[int],
    max_length: int,
    is_cv: bool,
    is_pos: bool,
    torch_device: torch.device,
) -> tuple[Sequence[torch.tensor], Sequence[torch.tensor]]:
  """Process raw model outputs into format understandable by the detector.
  Args:
   all_outputs: sequence of outputs of shape [batch_size, output_len].
   logits_processor: logits processor used for watermarking.
   tokenizer: tokenizer used for the model.
   pos_truncation_length: Length to truncate the watermarked outputs. If None,
     then no truncation is applied.
   neg_truncation_length: Length to truncate the unwatermarked outputs. If None,
     then no truncation is applied.
   max_length: Length to pad truncated outputs so that all processed entries.
     have same shape.
   is_cv: Process given outputs for cross validation.
   is_pos: Process given outputs for positives.
   torch_device: torch device to use.
  Returns:
    Tuple of
      all_masks: list of masks of shape [batch_size, max_length].
      all_g_values: list of g_values of shape [batch_size, max_length, depth].
  """
  all_masks = []
  all_g_values = []
  for outputs in tqdm.tqdm(all_outputs):
    # outputs is of shape [batch_size, output_len].
    # output_len can differ from batch to batch.
    eos_token_mask = logits_processor.compute_eos_token_mask(
        input_ids=outputs,
        eos_token_id=tokenizer.eos_token_id,
    )
    if is_pos or is_cv:
      # filter with length for positives for both train and CV.
      # We also filter for length when CV negatives are processed.
      outputs = filter_and_truncate(
          outputs, pos_truncation_length, eos_token_mask
      )
    elif not is_pos and not is_cv:
      outputs = filter_and_truncate(
          outputs, neg_truncation_length, eos_token_mask
      )
    # If no filtered outputs skip this batch.
    if outputs.shape[0] == 0:
      continue
    # All outputs are padded to max-length with eos-tokens.
    outputs = pad_to_len(
        outputs,
        max_length,
        left_pad=False,
        eos_token=tokenizer.eos_token_id,
        device=torch_device,
    )
    # outputs shape [num_filtered_entries, max_length]
    eos_token_mask = logits_processor.compute_eos_token_mask(
        input_ids=outputs,
        eos_token_id=tokenizer.eos_token_id,
    )
    context_repetition_mask = logits_processor.compute_context_repetition_mask(
        input_ids=outputs,
    )
    # context_repetition_mask of shape [num_filtered_entries, max_length -
    # (ngram_len - 1)].
    context_repetition_mask = pad_to_len(
        context_repetition_mask,
        max_length,
        left_pad=True,
        eos_token=0,
        device=torch_device,
    )
    # We pad on left to get same max_length shape.
    # context_repetition_mask of shape [num_filtered_entries, max_length].
    combined_mask = context_repetition_mask * eos_token_mask
    g_values = logits_processor.compute_g_values(
        input_ids=outputs,
    )
    # g_values of shape [num_filtered_entries, max_length - (ngram_len - 1),
    # depth].
    g_values = pad_to_len(
        g_values, max_length, left_pad=True, eos_token=0, device=torch_device
    )
    # We pad on left to get same max_length shape.
    # g_values of shape [num_filtered_entries, max_length, depth].
    all_masks.append(combined_mask)
    all_g_values.append(g_values)
  return all_masks, all_g_values | 
	Process raw model outputs into format understandable by the detector.
  Args:
   all_outputs: sequence of outputs of shape [batch_size, output_len].
   logits_processor: logits processor used for watermarking.
   tokenizer: tokenizer used for the model.
   pos_truncation_length: Length to truncate the watermarked outputs. If None,
     then no truncation is applied.
   neg_truncation_length: Length to truncate the unwatermarked outputs. If None,
     then no truncation is applied.
   max_length: Length to pad truncated outputs so that all processed entries.
     have same shape.
   is_cv: Process given outputs for cross validation.
   is_pos: Process given outputs for positives.
   torch_device: torch device to use.
  Returns:
    Tuple of
      all_masks: list of masks of shape [batch_size, max_length].
      all_g_values: list of g_values of shape [batch_size, max_length, depth].
   | 
	process_outputs_for_training | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def __call__(self, g_values: jnp.ndarray) -> jnp.ndarray:
    """Computes likelihoods given g-values and a mask.
    Args:
      g_values: g-values (all are 0 or 1) of shape [batch_size, seq_len,
        watermarking_depth, ...].
    Returns:
      an array of shape [batch_size, seq_len, watermarking_depth] or
      [batch_size, seq_len, 1] corresponding to the likelihoods
      of the g-values given either the watermarked hypothesis or
      the unwatermarked hypothesis; i.e. either P(g|watermarked)
      or P(g|unwatermarked).
    """ | 
	Computes likelihoods given g-values and a mask.
    Args:
      g_values: g-values (all are 0 or 1) of shape [batch_size, seq_len,
        watermarking_depth, ...].
    Returns:
      an array of shape [batch_size, seq_len, watermarking_depth] or
      [batch_size, seq_len, 1] corresponding to the likelihoods
      of the g-values given either the watermarked hypothesis or
      the unwatermarked hypothesis; i.e. either P(g|watermarked)
      or P(g|unwatermarked).
     | 
	__call__ | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def _compute_latents(
      self, g_values: jnp.ndarray
  ) -> tuple[jnp.ndarray, jnp.ndarray]:
    """Computes the unique token probability distribution given g-values.
    Args:
      g_values: Pseudorandom function values of shape [batch_size, seq_len,
        watermarking_depth].
    Returns:
      p_one_unique_token and p_two_unique_tokens, both of shape
        [batch_size, seq_len, watermarking_depth]. p_one_unique_token[i,t,l]
        gives the probability of there being one unique token in a tournament
        match on layer l, on timestep t, for batch item i.
        p_one_unique_token[i,t,l] + p_two_unique_token[i,t,l] = 1.
    """
    # Tile g-values to produce feature vectors for predicting the latents
    # for each layer in the tournament; our model for the latents psi is a
    # logistic regression model psi = sigmoid(delta * x + beta).
    x = jnp.repeat(
        jnp.expand_dims(g_values, axis=-2), self.watermarking_depth, axis=-2
    )  # [batch_size, seq_len, watermarking_depth, watermarking_depth]
    x = jnp.tril(
        x, k=-1
    )  # mask all elements above -1 diagonal for autoregressive factorization
    logits = (
        jnp.einsum("ijkl,ijkl->ijk", self.delta, x) + self.beta
    )  # [batch_size, seq_len, watermarking_depth]
    p_two_unique_tokens = jax.nn.sigmoid(logits)
    p_one_unique_token = 1 - p_two_unique_tokens
    return p_one_unique_token, p_two_unique_tokens | 
	Computes the unique token probability distribution given g-values.
    Args:
      g_values: Pseudorandom function values of shape [batch_size, seq_len,
        watermarking_depth].
    Returns:
      p_one_unique_token and p_two_unique_tokens, both of shape
        [batch_size, seq_len, watermarking_depth]. p_one_unique_token[i,t,l]
        gives the probability of there being one unique token in a tournament
        match on layer l, on timestep t, for batch item i.
        p_one_unique_token[i,t,l] + p_two_unique_token[i,t,l] = 1.
     | 
	_compute_latents | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def __call__(self, g_values: jnp.ndarray) -> jnp.ndarray:
    """Computes the likelihoods P(g_values|watermarked).
    Args:
      g_values: g-values (values 0 or 1) of shape [batch_size, seq_len,
        watermarking_depth]
    Returns:
      p(g_values|watermarked) of shape [batch_size, seq_len,
      watermarking_depth].
    """
    p_one_unique_token, p_two_unique_tokens = self._compute_latents(g_values)
    # P(g_tl | watermarked) is equal to
    # 0.5 * [ (g_tl+0.5) * p_two_unique_tokens + p_one_unique_token].
    return 0.5 * ((g_values + 0.5) * p_two_unique_tokens + p_one_unique_token) | 
	Computes the likelihoods P(g_values|watermarked).
    Args:
      g_values: g-values (values 0 or 1) of shape [batch_size, seq_len,
        watermarking_depth]
    Returns:
      p(g_values|watermarked) of shape [batch_size, seq_len,
      watermarking_depth].
     | 
	__call__ | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def _compute_posterior(
    likelihoods_watermarked: jnp.ndarray,
    likelihoods_unwatermarked: jnp.ndarray,
    mask: jnp.ndarray,
    prior: float,
) -> jnp.ndarray:
  """Compute posterior P(w|g) given likelihoods, mask and prior.
  Args:
    likelihoods_watermarked: shape [batch, length, depth]. Likelihoods
      P(g_values|watermarked) of g-values under watermarked model.
    likelihoods_unwatermarked: shape [batch, length, depth]. Likelihoods
      P(g_values|unwatermarked) of g-values under unwatermarked model.
    mask: A binary array shape [batch, length] indicating which g-values should
      be used. g-values with mask value 0 are discarded.
    prior: Prior probability P(w) that the text is watermarked.
  Returns:
    Posterior probability P(watermarked|g_values), shape [batch].
  """
  mask = jnp.expand_dims(mask, -1)
  prior = jnp.clip(prior, a_min=1e-5, a_max=1 - 1e-5)
  log_likelihoods_watermarked = jnp.log(
      jnp.clip(likelihoods_watermarked, a_min=1e-30, a_max=float("inf"))
  )
  log_likelihoods_unwatermarked = jnp.log(
      jnp.clip(likelihoods_unwatermarked, a_min=1e-30, a_max=float("inf"))
  )
  log_odds = log_likelihoods_watermarked - log_likelihoods_unwatermarked
  # Sum relative surprisals (log odds) across all token positions and layers.
  relative_surprisal_likelihood = jnp.einsum(
      "i...->i", log_odds * mask
  )  # [batch_size].
  relative_surprisal_prior = jnp.log(prior) - jnp.log(1 - prior)
  # Combine prior and likelihood.
  relative_surprisal = (
      relative_surprisal_prior + relative_surprisal_likelihood
  )  # [batch_size]
  # Compute the posterior probability P(w|g) = sigmoid(relative_surprisal).
  return jax.nn.sigmoid(relative_surprisal) | 
	Compute posterior P(w|g) given likelihoods, mask and prior.
  Args:
    likelihoods_watermarked: shape [batch, length, depth]. Likelihoods
      P(g_values|watermarked) of g-values under watermarked model.
    likelihoods_unwatermarked: shape [batch, length, depth]. Likelihoods
      P(g_values|unwatermarked) of g-values under unwatermarked model.
    mask: A binary array shape [batch, length] indicating which g-values should
      be used. g-values with mask value 0 are discarded.
    prior: Prior probability P(w) that the text is watermarked.
  Returns:
    Posterior probability P(watermarked|g_values), shape [batch].
   | 
	_compute_posterior | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def __call__(
      self,
      g_values: jnp.ndarray,
      mask: jnp.ndarray,
  ) -> jnp.ndarray:
    """Computes the watermarked posterior P(watermarked|g_values).
    Args:
      g_values: g-values (with values 0 or 1) of shape [batch_size, seq_len,
        watermarking_depth, ...]
      mask: A binary array shape [batch_size, seq_len] indicating which g-values
        should be used. g-values with mask value 0 are discarded.
    Returns:
      P(watermarked | g_values), of shape [batch_size].
    """
    likelihoods_watermarked = self.likelihood_model_watermarked(g_values)
    likelihoods_unwatermarked = self.likelihood_model_unwatermarked(g_values)
    return _compute_posterior(
        likelihoods_watermarked, likelihoods_unwatermarked, mask, self.prior
    ) | 
	Computes the watermarked posterior P(watermarked|g_values).
    Args:
      g_values: g-values (with values 0 or 1) of shape [batch_size, seq_len,
        watermarking_depth, ...]
      mask: A binary array shape [batch_size, seq_len] indicating which g-values
        should be used. g-values with mask value 0 are discarded.
    Returns:
      P(watermarked | g_values), of shape [batch_size].
     | 
	__call__ | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def loss_fn(
    params: Mapping[str, Any],
    detector_inputs: Any,
    w_true: jnp.ndarray,
    l2_batch_weight: float,
    detector_module: BayesianDetectorModule,
) -> jnp.ndarray:
  """Calculates loss for a batch of data given parameters."""
  w_pred = detector_module.apply(
      params, *detector_inputs, method=detector_module.__call__
  )
  unweighted_l2 = detector_module.apply(params, method=detector_module.l2_loss)
  l2_loss = l2_batch_weight * unweighted_l2
  return xentropy_loss(w_true, w_pred) + l2_loss | 
	Calculates loss for a batch of data given parameters. | 
	loss_fn | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def train(
    *,
    detector_module: BayesianDetectorModule,
    g_values: jnp.ndarray,
    mask: jnp.ndarray,
    watermarked: jnp.ndarray,
    epochs: int = 250,
    learning_rate: float = 1e-3,
    minibatch_size: int = 64,
    seed: int = 0,
    l2_weight: float = 0.0,
    shuffle: bool = True,
    g_values_val: Optional[jnp.ndarray] = None,
    mask_val: Optional[jnp.ndarray] = None,
    watermarked_val: Optional[jnp.ndarray] = None,
    verbose: bool = False,
    validation_metric: ValidationMetric = ValidationMetric.TPR_AT_FPR,
) -> tuple[Mapping[int, Mapping[str, PyTree]], float]:
  """Trains a Bayesian detector model.
  Args:
    detector_module: The detector module to train in-place.
    g_values: g-values of shape [num_train, seq_len, watermarking_depth].
    mask: A binary array shape [num_train, seq_len] indicating which g-values
      should be used. g-values with mask value 0 are discarded.
    watermarked: A binary array of shape [num_train] indicating whether the
      example is watermarked (0: unwatermarked, 1: watermarked).
    epochs: Number of epochs to train for.
    learning_rate: Learning rate for optimizer.
    minibatch_size: Minibatch size for training. Note that a minibatch requires
      ~ 32 * minibatch_size * seq_len * watermarked_depth * watermarked_depth
      bits of memory.
    seed: Seed for parameter initialization.
    l2_weight: Weight to apply to L2 regularization for delta parameters.
    shuffle: Whether to shuffle before training.
    g_values_val: Validation g-values of shape [num_val, seq_len,
      watermarking_depth].
    mask_val: Validation mask of shape [num_val, seq_len].
    watermarked_val: Validation watermark labels of shape [num_val].
    verbose: Boolean indicating verbosity of training. If true, the loss will be
      printed. Defaulted to False.
    validation_metric: validation metric to use.
  Returns:
    Tuple of
      training_history: Training history keyed by epoch number where the
      values are
        dictionaries containing the loss, validation loss, and model
        parameters,
        keyed by
        'loss', 'val_loss', and 'params', respectively.
      min_val_loss: Minimum validation loss achieved during training.
  """
  minibatch_inds = jnp.arange(0, len(g_values), minibatch_size)
  minibatch_inds_val = None
  if g_values_val is not None:
    minibatch_inds_val = jnp.arange(0, len(g_values_val), minibatch_size)
  rng = jax.random.PRNGKey(seed)
  param_rng, shuffle_rng = jax.random.split(rng)
  def coshuffle(*args):
    return [jax.random.permutation(shuffle_rng, x) for x in args]
  if shuffle:
    g_values, mask, watermarked = coshuffle(g_values, mask, watermarked)
  def update_fn_if_fpr_tpr(params):
    """Loss function for negative TPR@FPR=1% as the validation loss."""
    tpr_ = tpr_at_fpr(
        params=params,
        detector_inputs=(g_values_val, mask_val),
        w_true=watermarked_val,
        minibatch_size=minibatch_size,
        detector_module=detector_module,
    )
    return -tpr_
  n_minibatches = len(g_values) / minibatch_size
  l2_batch_weight_train = l2_weight / n_minibatches
  l2_batch_weight_val = 0.0
  loss_fn_train = functools.partial(
      loss_fn,
      l2_batch_weight=l2_batch_weight_train,
      detector_module=detector_module,
  )
  loss_fn_jitted_val = jax.jit(
      functools.partial(
          loss_fn,
          l2_batch_weight=l2_batch_weight_val,
          detector_module=detector_module,
      )
  )
  @jax.jit
  def update(gvalues, masks, labels, params, opt_state):
    loss_fn_partialed = functools.partial(
        loss_fn_train,
        detector_inputs=(gvalues, masks),
        w_true=labels,
    )
    loss, grads = jax.value_and_grad(loss_fn_partialed)(params)
    updates, opt_state = optimizer.update(grads, opt_state)
    params = optax.apply_updates(params, updates)
    return loss, params, opt_state
  def update_with_minibatches(gvalues, masks, labels, inds, params, opt_state):
    """Update params iff opt_state is not None and always returns the loss."""
    losses = []
    for start in inds:
      end = start + minibatch_size
      loss, params, opt_state = update(
          gvalues[start:end],
          masks[start:end],
          labels[start:end],
          params,
          opt_state,
      )
      losses.append(loss)
    loss = jnp.mean(jnp.array(losses))
    return loss, params, opt_state
  def validate_with_minibatches(gvalues, masks, labels, inds, params):
    """Update params iff opt_state is not None and always returns the loss."""
    losses = []
    for start in inds:
      end = start + minibatch_size
      loss = loss_fn_jitted_val(
          params,
          detector_inputs=(gvalues[start:end], masks[start:end]),
          w_true=labels[start:end],
      )
      losses.append(loss)
    return jnp.mean(jnp.array(losses))
  def update_fn(opt_state, params):
    """Updates the model parameters and returns the loss."""
    loss, params, opt_state = update_with_minibatches(
        g_values, mask, watermarked, minibatch_inds, params, opt_state
    )
    val_loss = None
    if g_values_val is not None:
      if validation_metric == ValidationMetric.TPR_AT_FPR:
        val_loss = update_fn_if_fpr_tpr(params)
      else:
        val_loss = validate_with_minibatches(
            g_values_val,
            mask_val,
            watermarked_val,
            minibatch_inds_val,
            params,
        )
    return opt_state, params, loss, val_loss
  params = detector_module.params
  if params is None:
    params = detector_module.init(param_rng, g_values[:1], mask[:1])
  optimizer = optax.adam(learning_rate=learning_rate)
  opt_state = optimizer.init(params)
  history = {}
  epochs_completed = 0
  while epochs_completed < epochs:
    opt_state, params, loss, val_loss = update_fn(opt_state, params)
    epochs_completed += 1
    history[epochs_completed] = {
        "loss": loss,
        "val_loss": val_loss,
        "params": params["params"],
    }
    if verbose:
      if val_loss is not None:
        print(
            f"Epoch {epochs_completed}: loss {loss} (train), {val_loss} (val)"
        )
      else:
        print(f"Epoch {epochs_completed}: loss {loss} (train)")
  detector_module.params = params
  val_loss = np.squeeze(
      np.array([history[epoch]["val_loss"] for epoch in range(1, epochs + 1)])
  )
  best_val_epoch = np.argmin(val_loss) + 1
  min_val_loss = val_loss[best_val_epoch - 1]
  print(f"Best val Epoch: {best_val_epoch}, min_val_loss: {min_val_loss}")
  detector_module.params = {"params": history[best_val_epoch]["params"]}
  return history, min_val_loss | 
	Trains a Bayesian detector model.
  Args:
    detector_module: The detector module to train in-place.
    g_values: g-values of shape [num_train, seq_len, watermarking_depth].
    mask: A binary array shape [num_train, seq_len] indicating which g-values
      should be used. g-values with mask value 0 are discarded.
    watermarked: A binary array of shape [num_train] indicating whether the
      example is watermarked (0: unwatermarked, 1: watermarked).
    epochs: Number of epochs to train for.
    learning_rate: Learning rate for optimizer.
    minibatch_size: Minibatch size for training. Note that a minibatch requires
      ~ 32 * minibatch_size * seq_len * watermarked_depth * watermarked_depth
      bits of memory.
    seed: Seed for parameter initialization.
    l2_weight: Weight to apply to L2 regularization for delta parameters.
    shuffle: Whether to shuffle before training.
    g_values_val: Validation g-values of shape [num_val, seq_len,
      watermarking_depth].
    mask_val: Validation mask of shape [num_val, seq_len].
    watermarked_val: Validation watermark labels of shape [num_val].
    verbose: Boolean indicating verbosity of training. If true, the loss will be
      printed. Defaulted to False.
    validation_metric: validation metric to use.
  Returns:
    Tuple of
      training_history: Training history keyed by epoch number where the
      values are
        dictionaries containing the loss, validation loss, and model
        parameters,
        keyed by
        'loss', 'val_loss', and 'params', respectively.
      min_val_loss: Minimum validation loss achieved during training.
   | 
	train | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def update_fn_if_fpr_tpr(params):
    """Loss function for negative TPR@FPR=1% as the validation loss."""
    tpr_ = tpr_at_fpr(
        params=params,
        detector_inputs=(g_values_val, mask_val),
        w_true=watermarked_val,
        minibatch_size=minibatch_size,
        detector_module=detector_module,
    )
    return -tpr_ | 
	Loss function for negative TPR@FPR=1% as the validation loss. | 
	update_fn_if_fpr_tpr | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def update_with_minibatches(gvalues, masks, labels, inds, params, opt_state):
    """Update params iff opt_state is not None and always returns the loss."""
    losses = []
    for start in inds:
      end = start + minibatch_size
      loss, params, opt_state = update(
          gvalues[start:end],
          masks[start:end],
          labels[start:end],
          params,
          opt_state,
      )
      losses.append(loss)
    loss = jnp.mean(jnp.array(losses))
    return loss, params, opt_state | 
	Update params iff opt_state is not None and always returns the loss. | 
	update_with_minibatches | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def validate_with_minibatches(gvalues, masks, labels, inds, params):
    """Update params iff opt_state is not None and always returns the loss."""
    losses = []
    for start in inds:
      end = start + minibatch_size
      loss = loss_fn_jitted_val(
          params,
          detector_inputs=(gvalues[start:end], masks[start:end]),
          w_true=labels[start:end],
      )
      losses.append(loss)
    return jnp.mean(jnp.array(losses)) | 
	Update params iff opt_state is not None and always returns the loss. | 
	validate_with_minibatches | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def update_fn(opt_state, params):
    """Updates the model parameters and returns the loss."""
    loss, params, opt_state = update_with_minibatches(
        g_values, mask, watermarked, minibatch_inds, params, opt_state
    )
    val_loss = None
    if g_values_val is not None:
      if validation_metric == ValidationMetric.TPR_AT_FPR:
        val_loss = update_fn_if_fpr_tpr(params)
      else:
        val_loss = validate_with_minibatches(
            g_values_val,
            mask_val,
            watermarked_val,
            minibatch_inds_val,
            params,
        )
    return opt_state, params, loss, val_loss | 
	Updates the model parameters and returns the loss. | 
	update_fn | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def score(self, outputs: jnp.ndarray) -> jnp.ndarray:
    """Score the model output for possibility of being watermarked.
    Score is within [0, 1] where 0 is not watermarked and 1 is watermarked.
    Args:
      outputs: model output of shape [batch_size, output_len]
    Returns:
      scores of shape [batch_size]
    """
    # eos mask is computed, skip first ngram_len - 1 tokens
    # eos_mask will be of shape [batch_size, output_len]
    eos_token_mask = self.logits_processor.compute_eos_token_mask(
        input_ids=outputs,
        eos_token_id=self.tokenizer.eos_token_id,
    )[:, self.logits_processor.ngram_len - 1 :]
    # context repetition mask is computed
    context_repetition_mask = (
        self.logits_processor.compute_context_repetition_mask(
            input_ids=outputs,
        )
    )
    # context repetition mask shape [batch_size, output_len - (ngram_len - 1)]
    combined_mask = context_repetition_mask * eos_token_mask
    g_values = self.logits_processor.compute_g_values(
        input_ids=outputs,
    )
    # g values shape [batch_size, output_len - (ngram_len - 1), depth]
    return self.detector_module.score(
        g_values.cpu().numpy(), combined_mask.cpu().numpy()
    ) | 
	Score the model output for possibility of being watermarked.
    Score is within [0, 1] where 0 is not watermarked and 1 is watermarked.
    Args:
      outputs: model output of shape [batch_size, output_len]
    Returns:
      scores of shape [batch_size]
     | 
	score | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def process_raw_model_outputs(
      cls,
      *,
      tokenized_wm_outputs: Union[Sequence[np.ndarray], np.ndarray],
      tokenized_uwm_outputs: Union[Sequence[np.ndarray], np.ndarray],
      logits_processor: logits_processing.SynthIDLogitsProcessor,
      tokenizer: Any,
      torch_device: torch.device,
      test_size: float = 0.3,
      pos_truncation_length: Optional[int] = 200,
      neg_truncation_length: Optional[int] = 100,
      max_padded_length: int = 2300,
  ) -> tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:
    """Process raw models outputs into inputs we can train.
    Args:
      tokenized_wm_outputs: tokenized outputs of watermarked data.
      tokenized_uwm_outputs: tokenized outputs of unwatermarked data.
      logits_processor: logits processor used for watermarking.
      tokenizer: tokenizer used for the model.
      torch_device: torch device to use.
      test_size: test size to use for train-test split.
      pos_truncation_length: Length to truncate wm outputs. If None, no
        truncation is applied.
      neg_truncation_length: Length to truncate uwm outputs. If None, no
        truncation is applied.
      max_padded_length: Length to pad truncated outputs so that all processed
        entries have same shape.
    Returns:
      Tuple of train_g_values, train_masks, train_labels, cv_g_values, cv_masks,
        cv_labels
    """
    # Split data into train and CV
    train_wm_outputs, cv_wm_outputs = model_selection.train_test_split(
        tokenized_wm_outputs, test_size=test_size
    )
    train_uwm_outputs, cv_uwm_outputs = model_selection.train_test_split(
        tokenized_uwm_outputs, test_size=test_size
    )
    # Process both train and CV data for training
    wm_masks_train, wm_g_values_train = process_outputs_for_training(
        [
            torch.tensor(outputs, device=torch_device, dtype=torch.long)
            for outputs in train_wm_outputs
        ],
        logits_processor=logits_processor,
        tokenizer=tokenizer,
        pos_truncation_length=pos_truncation_length,
        neg_truncation_length=neg_truncation_length,
        max_length=max_padded_length,
        is_pos=True,
        is_cv=False,
        torch_device=torch_device,
    )
    wm_masks_cv, wm_g_values_cv = process_outputs_for_training(
        [
            torch.tensor(outputs, device=torch_device, dtype=torch.long)
            for outputs in cv_wm_outputs
        ],
        logits_processor=logits_processor,
        tokenizer=tokenizer,
        pos_truncation_length=pos_truncation_length,
        neg_truncation_length=neg_truncation_length,
        max_length=max_padded_length,
        is_pos=True,
        is_cv=True,
        torch_device=torch_device,
    )
    uwm_masks_train, uwm_g_values_train = process_outputs_for_training(
        [
            torch.tensor(outputs, device=torch_device, dtype=torch.long)
            for outputs in train_uwm_outputs
        ],
        logits_processor=logits_processor,
        tokenizer=tokenizer,
        pos_truncation_length=pos_truncation_length,
        neg_truncation_length=neg_truncation_length,
        max_length=max_padded_length,
        is_pos=False,
        is_cv=False,
        torch_device=torch_device,
    )
    uwm_masks_cv, uwm_g_values_cv = process_outputs_for_training(
        [
            torch.tensor(outputs, device=torch_device, dtype=torch.long)
            for outputs in cv_uwm_outputs
        ],
        logits_processor=logits_processor,
        tokenizer=tokenizer,
        pos_truncation_length=pos_truncation_length,
        neg_truncation_length=neg_truncation_length,
        max_length=max_padded_length,
        is_pos=False,
        is_cv=True,
        torch_device=torch_device,
    )
    # We get list of data; here we concat all together to be passed to the
    # detector.
    wm_masks_train = torch.cat(wm_masks_train, dim=0)
    wm_g_values_train = torch.cat(wm_g_values_train, dim=0)
    wm_labels_train = torch.ones((wm_masks_train.shape[0],), dtype=torch.bool)
    wm_masks_cv = torch.cat(wm_masks_cv, dim=0)
    wm_g_values_cv = torch.cat(wm_g_values_cv, dim=0)
    wm_labels_cv = torch.ones((wm_masks_cv.shape[0],), dtype=torch.bool)
    uwm_masks_train = torch.cat(uwm_masks_train, dim=0)
    uwm_g_values_train = torch.cat(uwm_g_values_train, dim=0)
    uwm_labels_train = torch.zeros(
        (uwm_masks_train.shape[0],), dtype=torch.bool
    )
    uwm_masks_cv = torch.cat(uwm_masks_cv, dim=0)
    uwm_g_values_cv = torch.cat(uwm_g_values_cv, dim=0)
    uwm_labels_cv = torch.zeros((uwm_masks_cv.shape[0],), dtype=torch.bool)
    # Concat pos and negatives data together.
    train_g_values = (
        torch.cat((wm_g_values_train, uwm_g_values_train), dim=0).cpu().numpy()
    )
    train_labels = (
        torch.cat((wm_labels_train, uwm_labels_train), axis=0).cpu().numpy()
    )
    train_masks = (
        torch.cat((wm_masks_train, uwm_masks_train), axis=0).cpu().numpy()
    )
    cv_g_values = (
        torch.cat((wm_g_values_cv, uwm_g_values_cv), axis=0).cpu().numpy()
    )
    cv_labels = torch.cat((wm_labels_cv, uwm_labels_cv), axis=0).cpu().numpy()
    cv_masks = torch.cat((wm_masks_cv, uwm_masks_cv), axis=0).cpu().numpy()
    # Free up GPU memory.
    del (
        wm_g_values_train,
        wm_labels_train,
        wm_masks_train,
        wm_g_values_cv,
        wm_labels_cv,
        wm_masks_cv,
    )
    gc.collect()
    torch.cuda.empty_cache()
    # Shuffle data.
    train_g_values = jnp.squeeze(train_g_values)
    train_labels = jnp.squeeze(train_labels)
    train_masks = jnp.squeeze(train_masks)
    cv_g_values = jnp.squeeze(cv_g_values)
    cv_labels = jnp.squeeze(cv_labels)
    cv_masks = jnp.squeeze(cv_masks)
    shuffled_idx = list(range(train_g_values.shape[0]))
    shuffled_idx = np.array(shuffled_idx)
    np.random.shuffle(shuffled_idx)
    train_g_values = train_g_values[shuffled_idx]
    train_labels = train_labels[shuffled_idx]
    train_masks = train_masks[shuffled_idx]
    shuffled_idx = list(range(cv_g_values.shape[0]))
    shuffled_idx = np.array(shuffled_idx)
    np.random.shuffle(shuffled_idx)
    cv_g_values = cv_g_values[shuffled_idx]
    cv_labels = cv_labels[shuffled_idx]
    cv_masks = cv_masks[shuffled_idx]
    return (
        train_g_values,
        train_masks,
        train_labels,
        cv_g_values,
        cv_masks,
        cv_labels,
    ) | 
	Process raw models outputs into inputs we can train.
    Args:
      tokenized_wm_outputs: tokenized outputs of watermarked data.
      tokenized_uwm_outputs: tokenized outputs of unwatermarked data.
      logits_processor: logits processor used for watermarking.
      tokenizer: tokenizer used for the model.
      torch_device: torch device to use.
      test_size: test size to use for train-test split.
      pos_truncation_length: Length to truncate wm outputs. If None, no
        truncation is applied.
      neg_truncation_length: Length to truncate uwm outputs. If None, no
        truncation is applied.
      max_padded_length: Length to pad truncated outputs so that all processed
        entries have same shape.
    Returns:
      Tuple of train_g_values, train_masks, train_labels, cv_g_values, cv_masks,
        cv_labels
     | 
	process_raw_model_outputs | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def train_best_detector_given_g_values(
      cls,
      *,
      train_g_values: jnp.ndarray,
      train_masks: jnp.ndarray,
      train_labels: jnp.ndarray,
      cv_g_values: jnp.ndarray,
      cv_masks: jnp.ndarray,
      cv_labels: jnp.ndarray,
      logits_processor: logits_processing.SynthIDLogitsProcessor,
      tokenizer: Any,
      n_epochs: int = 50,
      learning_rate: float = 2.1e-2,
      l2_weights: np.ndarray = np.logspace(-3, -2, num=4),
      verbose: bool = False,
  ) -> tuple["BayesianDetector", float]:
    """Train best detector given g_values, mask and labels."""
    best_detector = None
    lowest_loss = float("inf")
    val_losses = []
    for l2_weight in l2_weights:
      detector_module = BayesianDetectorModule(
          watermarking_depth=len(logits_processor.keys),
      )
      _, min_val_loss = train(
          detector_module=detector_module,
          g_values=train_g_values,
          mask=train_masks,
          watermarked=train_labels,
          g_values_val=cv_g_values,
          mask_val=cv_masks,
          watermarked_val=cv_labels,
          learning_rate=learning_rate,
          l2_weight=l2_weight,
          epochs=n_epochs,
          verbose=verbose,
      )
      val_losses.append(min_val_loss)
      if min_val_loss < lowest_loss:
        lowest_loss = min_val_loss
        best_detector = detector_module
    return cls(logits_processor, tokenizer, best_detector.params), lowest_loss | 
	Train best detector given g_values, mask and labels. | 
	train_best_detector_given_g_values | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def train_best_detector(
      cls,
      *,
      tokenized_wm_outputs: Union[Sequence[np.ndarray], np.ndarray],
      tokenized_uwm_outputs: Union[Sequence[np.ndarray], np.ndarray],
      logits_processor: logits_processing.SynthIDLogitsProcessor,
      tokenizer: Any,
      torch_device: torch.device,
      test_size: float = 0.3,
      pos_truncation_length: Optional[int] = 200,
      neg_truncation_length: Optional[int] = 100,
      max_padded_length: int = 2300,
      n_epochs: int = 50,
      learning_rate: float = 2.1e-2,
      l2_weights: np.ndarray = np.logspace(-3, -2, num=4),
      verbose: bool = False,
  ) -> tuple["BayesianDetector", float]:
    """Construct, train and return the best detector based on wm and uwm data.
    In practice, we have found that tuning pos_truncation_length,
    neg_truncation_length, n_epochs, learning_rate and l2_weights can help
    improve the performance of the detector. We recommend tuning these
    parameters for your data.
    Args:
      tokenized_wm_outputs: tokenized outputs of watermarked data.
      tokenized_uwm_outputs: tokenized outputs of unwatermarked data.
      logits_processor: logits processor used for watermarking.
      tokenizer: tokenizer used for the model.
      torch_device: torch device to use.
      test_size: test size to use for train-test split.
      pos_truncation_length: Length to truncate wm outputs. If None, no
        truncation is applied.
      neg_truncation_length: Length to truncate uwm outputs. If None, no
        truncation is done.
      max_padded_length: Length to pad truncated outputs so that all processed
        entries have same shape.
      n_epochs: Number of epochs to train the detector.
      learning_rate: Learning rate to use for training the detector.
      l2_weights: L2 weights to use for training the detector.
      verbose: Whether to print training progress.
    Returns:
      Tuple of trained detector and loss achieved on CV data.
    """
    if torch_device.type in ("cuda", "tpu"):
      raise ValueError(
          "We have found the training unstable on CPUs; we are working on"
          " a fix. Use GPU or TPU for training."
      )
    (
        train_g_values,
        train_masks,
        train_labels,
        cv_g_values,
        cv_masks,
        cv_labels,
    ) = cls.process_raw_model_outputs(
        tokenized_wm_outputs=tokenized_wm_outputs,
        tokenized_uwm_outputs=tokenized_uwm_outputs,
        logits_processor=logits_processor,
        tokenizer=tokenizer,
        torch_device=torch_device,
        test_size=test_size,
        pos_truncation_length=pos_truncation_length,
        neg_truncation_length=neg_truncation_length,
        max_padded_length=max_padded_length,
    )
    return cls.train_best_detector_given_g_values(
        train_g_values=train_g_values,
        train_masks=train_masks,
        train_labels=train_labels,
        cv_g_values=cv_g_values,
        cv_masks=cv_masks,
        cv_labels=cv_labels,
        logits_processor=logits_processor,
        tokenizer=tokenizer,
        verbose=verbose,
        n_epochs=n_epochs,
        learning_rate=learning_rate,
        l2_weights=l2_weights,
    ) | 
	Construct, train and return the best detector based on wm and uwm data.
    In practice, we have found that tuning pos_truncation_length,
    neg_truncation_length, n_epochs, learning_rate and l2_weights can help
    improve the performance of the detector. We recommend tuning these
    parameters for your data.
    Args:
      tokenized_wm_outputs: tokenized outputs of watermarked data.
      tokenized_uwm_outputs: tokenized outputs of unwatermarked data.
      logits_processor: logits processor used for watermarking.
      tokenizer: tokenizer used for the model.
      torch_device: torch device to use.
      test_size: test size to use for train-test split.
      pos_truncation_length: Length to truncate wm outputs. If None, no
        truncation is applied.
      neg_truncation_length: Length to truncate uwm outputs. If None, no
        truncation is done.
      max_padded_length: Length to pad truncated outputs so that all processed
        entries have same shape.
      n_epochs: Number of epochs to train the detector.
      learning_rate: Learning rate to use for training the detector.
      l2_weights: L2 weights to use for training the detector.
      verbose: Whether to print training progress.
    Returns:
      Tuple of trained detector and loss achieved on CV data.
     | 
	train_best_detector | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_bayesian.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_bayesian.py | 
	Apache-2.0 | 
| 
	def mean_score(
    g_values: jnp.ndarray,
    mask: jnp.ndarray,
) -> jnp.ndarray:
  """Computes the Mean score.
  Args:
    g_values: g-values of shape [batch_size, seq_len, watermarking_depth].
    mask: A binary array shape [batch_size, seq_len] indicating which g-values
      should be used. g-values with mask value 0 are discarded.
  Returns:
    Mean scores, of shape [batch_size]. This is the mean of the unmasked
      g-values.
  """
  watermarking_depth = g_values.shape[-1]
  num_unmasked = jnp.sum(mask, axis=1)  # shape [batch_size]
  return jnp.sum(g_values * jnp.expand_dims(mask, 2), axis=(1, 2)) / (
      watermarking_depth * num_unmasked
  ) | 
	Computes the Mean score.
  Args:
    g_values: g-values of shape [batch_size, seq_len, watermarking_depth].
    mask: A binary array shape [batch_size, seq_len] indicating which g-values
      should be used. g-values with mask value 0 are discarded.
  Returns:
    Mean scores, of shape [batch_size]. This is the mean of the unmasked
      g-values.
   | 
	mean_score | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_mean.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_mean.py | 
	Apache-2.0 | 
| 
	def weighted_mean_score(
    g_values: jnp.ndarray,
    mask: jnp.ndarray,
    weights: Optional[jnp.ndarray] = None,
) -> jnp.ndarray:
  """Computes the Weighted Mean score.
  Args:
    g_values: g-values of shape [batch_size, seq_len, watermarking_depth].
    mask: A binary array shape [batch_size, seq_len] indicating which g-values
      should be used. g-values with mask value 0 are discarded.
    weights: array of non-negative floats, shape [watermarking_depth]. The
      weights to be applied to the g-values. If not supplied, defaults to
      linearly decreasing weights from 10 to 1.
  Returns:
    Weighted Mean scores, of shape [batch_size]. This is the mean of the
      unmasked g-values, re-weighted using weights.
  """
  watermarking_depth = g_values.shape[-1]
  if weights is None:
    weights = jnp.linspace(start=10, stop=1, num=watermarking_depth)
  # Normalise weights so they sum to watermarking_depth.
  weights *= watermarking_depth / jnp.sum(weights)
  # Apply weights to g-values.
  g_values *= jnp.expand_dims(weights, axis=(0, 1))
  num_unmasked = jnp.sum(mask, axis=1)  # shape [batch_size]
  return jnp.sum(g_values * jnp.expand_dims(mask, 2), axis=(1, 2)) / (
      watermarking_depth * num_unmasked
  ) | 
	Computes the Weighted Mean score.
  Args:
    g_values: g-values of shape [batch_size, seq_len, watermarking_depth].
    mask: A binary array shape [batch_size, seq_len] indicating which g-values
      should be used. g-values with mask value 0 are discarded.
    weights: array of non-negative floats, shape [watermarking_depth]. The
      weights to be applied to the g-values. If not supplied, defaults to
      linearly decreasing weights from 10 to 1.
  Returns:
    Weighted Mean scores, of shape [batch_size]. This is the mean of the
      unmasked g-values, re-weighted using weights.
   | 
	weighted_mean_score | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/detector_mean.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/detector_mean.py | 
	Apache-2.0 | 
| 
	def expected_mean_g_value(
    vocab_size: int,
    num_leaves: int = 2,
) -> float:
  """Compute expected mean g-value after watermarking, assuming uniform LM dist.
  This is the theoretical expected value for a single-layer of tournament
  watermarking, using a Bernoulli(0.5) g-value distribution and N=num_leaves
  samples, assuming that the LM distribution p_LM is uniform.
  Args:
    vocab_size: The size of the vocabulary.
    num_leaves: Number of leaves per node in the tournament tree (N in the
      paper).
  Returns:
    The expected mean g-value for watermarked text.
  """
  if num_leaves == 2:
    # This equation is from Corollary 27 in Supplementary Information of paper,
    # in the case where p_LM is uniform.
    return 0.5 + 0.25 * (1 - (1 / vocab_size))
  elif num_leaves == 3:
    # This case can be derived from Theorem 25 in Supplementary Information of
    # the paper, in the case where N=3 and p_LM is uniform.
    return 7 / 8 - (3 / (8 * vocab_size))
  else:
    raise ValueError(
        f'Only 2 or 3 leaves are supported for the expected mean g-value'
        f' computation, but got {num_leaves}.'
    ) | 
	Compute expected mean g-value after watermarking, assuming uniform LM dist.
  This is the theoretical expected value for a single-layer of tournament
  watermarking, using a Bernoulli(0.5) g-value distribution and N=num_leaves
  samples, assuming that the LM distribution p_LM is uniform.
  Args:
    vocab_size: The size of the vocabulary.
    num_leaves: Number of leaves per node in the tournament tree (N in the
      paper).
  Returns:
    The expected mean g-value for watermarked text.
   | 
	expected_mean_g_value | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/g_value_expectations.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/g_value_expectations.py | 
	Apache-2.0 | 
| 
	def accumulate_hash(
    current_hash: torch.LongTensor,
    data: torch.LongTensor,
    multiplier: int = 6364136223846793005,
    increment: int = 1,
) -> torch.LongTensor:
  """Accumulate hash of data on current hash.
  Method uses adapted linear congruential generator with newlib/musl parameters.
  This function has following property -
  f(x, data[T]) = f(f(x, data[:T - 1]), data[T])
  This function expects current_hash.shape and data.shape[:-1] to
  match/broadcastable.
  Args:
    current_hash: (shape,)
    data: (shape, tensor_len)
    multiplier: (int) multiplier of linear congruential generator
    increment: (int) increment of linear congruential generator
  Returns:
    updated hash (shape,)
  """
  for i in range(data.shape[-1]):
    current_hash = torch.add(current_hash, data[..., i])
    current_hash = torch.mul(current_hash, multiplier)
    current_hash = torch.add(current_hash, increment)
  return current_hash | 
	Accumulate hash of data on current hash.
  Method uses adapted linear congruential generator with newlib/musl parameters.
  This function has following property -
  f(x, data[T]) = f(f(x, data[:T - 1]), data[T])
  This function expects current_hash.shape and data.shape[:-1] to
  match/broadcastable.
  Args:
    current_hash: (shape,)
    data: (shape, tensor_len)
    multiplier: (int) multiplier of linear congruential generator
    increment: (int) increment of linear congruential generator
  Returns:
    updated hash (shape,)
   | 
	accumulate_hash | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/hashing_function.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/hashing_function.py | 
	Apache-2.0 | 
| 
	def update_scores(
    scores: torch.FloatTensor,
    g_values: torch.FloatTensor,
) -> torch.FloatTensor:
  """Updates scores using the g values.
  We assume that the scores are in the log space.
  Args:
    scores: Scores (batch_size, vocab_size).
    g_values: G values (batch_size, vocab_size, depth).
  Returns:
    Updated scores (batch_size, vocab_size).
  """
  _, _, depth = g_values.shape
  device = scores.device
  probs = torch.softmax(scores, dim=1)
  for i in range(depth):
    g_values_at_depth = g_values[:, :, i]
    g_mass_at_depth = (g_values_at_depth * probs).sum(axis=1, keepdims=True)
    probs = probs * (1 + g_values_at_depth - g_mass_at_depth)
  log_probs = torch.log(probs)
  log_probs = torch.where(
      torch.isfinite(log_probs), log_probs, torch.tensor(-1e12, device=device)
  )
  return log_probs | 
	Updates scores using the g values.
  We assume that the scores are in the log space.
  Args:
    scores: Scores (batch_size, vocab_size).
    g_values: G values (batch_size, vocab_size, depth).
  Returns:
    Updated scores (batch_size, vocab_size).
   | 
	update_scores | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing.py | 
	Apache-2.0 | 
| 
	def update_scores_distortionary(
    scores: torch.FloatTensor,
    g_values: torch.FloatTensor,
    num_leaves: int,
) -> torch.FloatTensor:
  """Update scores using the g values for distortionary tournament watermarking.
  We assume that the scores are in the log space.
  Args:
    scores: Scores (batch_size, vocab_size).
    g_values: G values (batch_size, vocab_size, depth).
    num_leaves: Number of leaves per node in the tournament tree.
  Returns:
    Updated scores (batch_size, vocab_size).
  """
  _, _, depth = g_values.shape
  device = scores.device
  probs = torch.softmax(scores, dim=1)
  for i in range(depth):
    g_values_at_depth = g_values[:, :, i]
    g_mass_at_depth = (g_values_at_depth * probs).sum(axis=1, keepdims=True)
    coeff_not_in_g = (1 - g_mass_at_depth)**(num_leaves - 1)
    coeff_in_g = (1 - (1 - g_mass_at_depth)**(num_leaves)) / g_mass_at_depth
    coeffs = torch.where(
        torch.logical_and(g_values_at_depth == 1, probs > 0),
        coeff_in_g, coeff_not_in_g)
    probs = probs * coeffs
  log_probs = torch.log(probs)
  log_probs = torch.where(
      torch.isfinite(log_probs), log_probs, torch.tensor(-1e12, device=device)
  )
  return log_probs | 
	Update scores using the g values for distortionary tournament watermarking.
  We assume that the scores are in the log space.
  Args:
    scores: Scores (batch_size, vocab_size).
    g_values: G values (batch_size, vocab_size, depth).
    num_leaves: Number of leaves per node in the tournament tree.
  Returns:
    Updated scores (batch_size, vocab_size).
   | 
	update_scores_distortionary | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing.py | 
	Apache-2.0 | 
| 
	def __init__(
      self,
      batch_size: int,
      ngram_len: int,
      context_history_size: int,
      device: torch.device,
  ):
    """Initializes the state.
    Args:
      batch_size: Batch size.
      ngram_len: Ngram length.
      context_history_size: Size of the tensor to keep track of seen contexts.
      device: Device to use.
    """
    self.context = torch.zeros(
        (batch_size, ngram_len - 1),
        dtype=torch.int64,
        device=device,
    )
    self.context_history = torch.zeros(
        (batch_size, context_history_size),
        dtype=torch.int64,
        device=device,
    )
    self.num_calls = 0 | 
	Initializes the state.
    Args:
      batch_size: Batch size.
      ngram_len: Ngram length.
      context_history_size: Size of the tensor to keep track of seen contexts.
      device: Device to use.
     | 
	__init__ | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing.py | 
	Apache-2.0 | 
| 
	def __init__(
      self,
      *,
      ngram_len: int,
      keys: Sequence[int],
      sampling_table_size: int,
      sampling_table_seed: int,
      context_history_size: int,
      temperature: float,
      top_k: int,
      device: torch.device,
      skip_first_ngram_calls: bool = False,
      apply_top_k: bool = True,
      num_leaves: int = 2
  ):
    """Initializes the logits processor.
    Args:
      ngram_len: Ngram length.
      keys: A sequence of watermarking keys, one for each depth.
      sampling_table_size: Size of the sampling table.
      sampling_table_seed: Random seed to generate the sampling table.
      context_history_size: Size of the tensor to keep track of seen contexts.
      temperature: Temperature to use for scaling the scores.
      top_k: Top k to use for sampling the scores.
      device: Device to use.
      skip_first_ngram_calls: Whether to skip first ngram calls.
      apply_top_k: Whether to apply top k to the scores.
      num_leaves: Number of leaves per node in the tournament tree.
    """
    self.ngram_len = ngram_len
    self.keys = torch.tensor(keys, device=device)
    generator = torch.Generator(device=device).manual_seed(sampling_table_seed)
    # A random sampling table is pre-computed and modulo table size is applied
    # to map from a hash of ngram keys to g values, this is similar to the
    # hashtable implementation used in
    # https://github.com/facebookresearch/three_bricks. We note that the
    # hashing employed in this repository is different from that used to
    # watermark the Gemini App, and hence the detectors trained based on the
    # hashing in this repository will not transfer to text generated by
    # the Gemini App.
    self.sampling_table = torch.randint(
        low=0,
        high=2,
        size=(sampling_table_size,),
        generator=generator,
        device=device,
    )
    self.context_history_size = context_history_size
    self.device = device
    self.state = None
    self.skip_first_ngram_calls = skip_first_ngram_calls
    self.apply_top_k = apply_top_k
    # Check validity of temperature.
    if not (isinstance(temperature, float) and temperature > 0):
      except_msg = (
          f"`temperature` (={temperature}) has to be a strictly positive float,"
          " otherwise your next token scores will be invalid."
      )
      if isinstance(temperature, float) and temperature == 0.0:
        except_msg += (
            " If you're looking for greedy decoding strategies, set"
            " `do_sample=False`."
        )
      raise ValueError(except_msg)
    self.temperature = temperature
    self._num_leaves = num_leaves
    # Check validity of top_k.
    if not (isinstance(top_k, int) and top_k > 1):
      raise ValueError(f"`top_k` has to be > 1, but is {top_k}")
    self.top_k = top_k | 
	Initializes the logits processor.
    Args:
      ngram_len: Ngram length.
      keys: A sequence of watermarking keys, one for each depth.
      sampling_table_size: Size of the sampling table.
      sampling_table_seed: Random seed to generate the sampling table.
      context_history_size: Size of the tensor to keep track of seen contexts.
      temperature: Temperature to use for scaling the scores.
      top_k: Top k to use for sampling the scores.
      device: Device to use.
      skip_first_ngram_calls: Whether to skip first ngram calls.
      apply_top_k: Whether to apply top k to the scores.
      num_leaves: Number of leaves per node in the tournament tree.
     | 
	__init__ | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing.py | 
	Apache-2.0 | 
| 
	def watermarked_call(
      self,
      input_ids: torch.LongTensor,
      scores: torch.FloatTensor,
  ) -> tuple[torch.FloatTensor, torch.LongTensor, torch.FloatTensor]:
    """Calls the logits processor statefully.
    This function computes top_k internally and returns the indices mapping
    from top_k scores to dense scores.
    Args:
      input_ids: Input token ids (batch_size, inputs_len).
      scores: Scores (batch_size, vocab_size).
    Returns:
      Tuple of
        Watermarked updated scores (batch_size, top_k)
        Top k indices (batch_size, top_k).
        original scores for perplexity calculations (batch_size, top_k)
    """
    self._check_input_ids_shape(input_ids)
    scores_processed = scores / self.temperature
    top_k_result = torch.topk(scores_processed, k=self.top_k, dim=1)
    batch_size, vocab_size = scores.shape
    if self.apply_top_k:
      scores_top_k = top_k_result.values
      # scores_top_k shape [batch_size, top_k]
      top_k_indices = top_k_result.indices
      # top_k_indices shape [batch_size, top_k]
    else:
      scores_top_k = scores_processed
      top_k_indices = torch.stack([
          torch.arange(vocab_size, device=self.device)
          for _ in range(batch_size)
      ])
    device = scores.device
    if str(device) != str(self.device):
      raise ValueError(
          "SynthIDLogitsProcessor received inputs with unexpected device.",
      )
    if self.state is None:
      # Initialize watermarking state if it does not exist.
      self._init_state(batch_size)
    else:
      # Append last input id (which is the input id added in last call) to the
      # previous context so we have the context to be used for current
      # watermarking.
      self.state.context = torch.concat(
          (self.state.context, input_ids[:, -1:]),
          dim=1,
      )
      self.state.context = self.state.context[:, 1:]
    assert self.state is not None
    self.state.num_calls += 1
    # Don't watermark the first ngram_len - 1 tokens if set.
    if self.skip_first_ngram_calls and self.state.num_calls < self.ngram_len:
      return scores_top_k, top_k_indices, scores_top_k
    # 2. Generate random keys for each ngram key combination.
    ngram_keys, hash_result_with_just_context = self._compute_keys(
        self.state.context, top_k_indices
    )
    # ngram_keys shape [batch_size, top_k, depth]
    # 3. Sample g values.
    g_values = self.sample_g_values(ngram_keys)
    # g_values shape [batch_size, top_k, depth]
    # 4. Modify scores.
    if self._num_leaves == 2:
      updated_scores = update_scores(scores_top_k, g_values)
    else:
      updated_scores = update_scores_distortionary(
          scores_top_k, g_values, self._num_leaves
      )
    # updated scores shape [batch_size, top_k]
    # 5. Check if the current watermarking context was previously used, if
    # yes skip watermarking.
    hash_result_with_just_context = hash_result_with_just_context[:, None]
    is_repeated_context = (
        self.state.context_history == hash_result_with_just_context
    ).any(
        dim=1,
        keepdim=True,
    )
    self.state.context_history = torch.concat(
        (hash_result_with_just_context, self.state.context_history),
        dim=1,
    )[:, :-1]
    updated_watermarked_scores = torch.where(
        is_repeated_context,
        input=scores_top_k,
        other=updated_scores,
    )
    return updated_watermarked_scores, top_k_indices, scores_top_k | 
	Calls the logits processor statefully.
    This function computes top_k internally and returns the indices mapping
    from top_k scores to dense scores.
    Args:
      input_ids: Input token ids (batch_size, inputs_len).
      scores: Scores (batch_size, vocab_size).
    Returns:
      Tuple of
        Watermarked updated scores (batch_size, top_k)
        Top k indices (batch_size, top_k).
        original scores for perplexity calculations (batch_size, top_k)
     | 
	watermarked_call | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing.py | 
	Apache-2.0 | 
| 
	def compute_ngram_keys(
      self,
      ngrams: torch.LongTensor,
  ) -> torch.LongTensor:
    """Computes random keys for each ngram and depth.
    Args:
      ngrams: Ngrams (batch_size, num_ngrams, ngram_len).
    Returns:
      ngram keys (batch_size, num_ngrams, depth).
    """
    if len(ngrams.shape) != 3:
      raise ValueError(
          "Ngrams should be of shape (batch_size, num_ngrams, ngram_len), but"
          f" is {ngrams.shape}"
      )
    if ngrams.shape[2] != self.ngram_len:
      raise ValueError(
          "Ngrams should be of shape (batch_size, num_ngrams, ngram_len),"
          f" where ngram_len is {self.ngram_len}, but is {ngrams.shape}"
      )
    batch_size, _, _ = ngrams.shape
    hash_result = torch.ones(batch_size, device=self.device, dtype=torch.long)
    # hash_result shape [batch_size,]
    # ngrams shape [batch_size, num_ngrams, ngram_len]
    hash_result = torch.vmap(
        hashing_function.accumulate_hash, in_dims=(None, 1), out_dims=1
    )(hash_result, ngrams)
    # hash_result shape [batch_size, num_ngrams]
    keys = self.keys[None, None, :, None]
    # hash_result shape [batch_size, num_ngrams]
    # keys shape [1, 1, depth, 1]
    hash_result = torch.vmap(
        hashing_function.accumulate_hash, in_dims=(None, 2), out_dims=2
    )(hash_result, keys)
    # hash_result shape [batch_size, num_ngrams, depth]
    return hash_result | 
	Computes random keys for each ngram and depth.
    Args:
      ngrams: Ngrams (batch_size, num_ngrams, ngram_len).
    Returns:
      ngram keys (batch_size, num_ngrams, depth).
     | 
	compute_ngram_keys | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing.py | 
	Apache-2.0 | 
| 
	def _compute_keys(
      self,
      n_minus_1_grams: torch.LongTensor,
      indices: torch.LongTensor,
  ) -> tuple[torch.LongTensor, torch.LongTensor]:
    """Computes random keys for each ngram and depth.
    Args:
      n_minus_1_grams: Ngrams (batch_size, ngram_len - 1).
      indices: indices of the continuations (batch_size, num_indices)
    Returns:
      Ngram keys (batch_size, num_indices, depth).
    """
    batch_size, _ = n_minus_1_grams.shape
    hash_result = torch.ones(batch_size, device=self.device, dtype=torch.long)
    # First hash n_minus_1 gram, for each batch entry we have a single
    # n_minus_1 gram context.
    # hash_result shape [batch_size]
    # n_minus_1_gram shape [batch_size, ngram_len - 1]
    hash_result_with_just_context = hashing_function.accumulate_hash(
        hash_result, n_minus_1_grams
    )
    # hash_result shape [batch_size,]
    # Indices is of shape [batch_size, num_indices], so we make it
    # [batch_size, num_indices, 1] so we can vmap over num_indices dim.
    hash_result = torch.vmap(
        hashing_function.accumulate_hash, in_dims=(None, 1), out_dims=1
    )(hash_result_with_just_context, indices[:, :, None])
    # hash_result shape [batch_size, num_indices]
    # Basically we have a hash for each batch entry and each indices
    # Now we add watermarking keys to this hash.
    # keys are of shape [depth,]
    # We add batch, num_indices and data dimension to this making it
    # [1, 1, depth, 1].
    # So we can vmap over the depth dimension for compute_hash
    keys = self.keys[None, None, :, None]
    hash_result = torch.vmap(
        hashing_function.accumulate_hash, in_dims=(None, 2), out_dims=2
    )(hash_result, keys)
    # hash_result shape should be [batch_size, num_indices, depth]
    return hash_result, hash_result_with_just_context | 
	Computes random keys for each ngram and depth.
    Args:
      n_minus_1_grams: Ngrams (batch_size, ngram_len - 1).
      indices: indices of the continuations (batch_size, num_indices)
    Returns:
      Ngram keys (batch_size, num_indices, depth).
     | 
	_compute_keys | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing.py | 
	Apache-2.0 | 
| 
	def sample_g_values(self, ngram_keys: torch.LongTensor) -> torch.LongTensor:
    """Samples g values from Bernoulli distribution.
    It is not possible to pass random keys in a vectorized way in torch. Instead
    we pre-compute a random sampling table, and use apply modulo table size to
    map from ngram keys (int64) to g values.
    Args:
      ngram_keys: Random keys (batch_size, num_ngrams, depth).
    Returns:
      G values (batch_size, num_ngrams, depth).
    """
    (sampling_table_size,) = self.sampling_table.shape
    sampling_table = self.sampling_table.reshape((1, 1, sampling_table_size))
    ngram_keys = ngram_keys % sampling_table_size
    return torch.take_along_dim(sampling_table, indices=ngram_keys, dim=2) | 
	Samples g values from Bernoulli distribution.
    It is not possible to pass random keys in a vectorized way in torch. Instead
    we pre-compute a random sampling table, and use apply modulo table size to
    map from ngram keys (int64) to g values.
    Args:
      ngram_keys: Random keys (batch_size, num_ngrams, depth).
    Returns:
      G values (batch_size, num_ngrams, depth).
     | 
	sample_g_values | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing.py | 
	Apache-2.0 | 
| 
	def _check_input_ids_shape(self, input_ids: torch.LongTensor):
    """Checks the shape of input ids."""
    if len(input_ids.shape) != 2:
      raise ValueError(
          "Input ids should be of shape (batch_size, input_len), but is"
          f" {input_ids.shape}"
      ) | 
	Checks the shape of input ids. | 
	_check_input_ids_shape | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing.py | 
	Apache-2.0 | 
| 
	def compute_g_values(
      self,
      input_ids: torch.LongTensor,
  ) -> torch.LongTensor:
    """Computes g values for each ngram from the given sequence of tokens.
    Args:
      input_ids: Input token ids (batch_size, input_len).
    Returns:
      G values (batch_size, input_len - (ngram_len - 1), depth).
    """
    self._check_input_ids_shape(input_ids)
    ngrams = input_ids.unfold(dimension=1, size=self.ngram_len, step=1)
    ngram_keys = self.compute_ngram_keys(ngrams)
    return self.sample_g_values(ngram_keys) | 
	Computes g values for each ngram from the given sequence of tokens.
    Args:
      input_ids: Input token ids (batch_size, input_len).
    Returns:
      G values (batch_size, input_len - (ngram_len - 1), depth).
     | 
	compute_g_values | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing.py | 
	Apache-2.0 | 
| 
	def compute_context_repetition_mask(
      self,
      input_ids: torch.LongTensor,
  ) -> torch.LongTensor:
    """Computes repetition mask.
    0 and 1 stand for repeated and not repeated context n-1 grams respectively.
    Args:
      input_ids: Input token ids (batch_size, input_len).
    Returns:
      Repetitions mask (batch_size, input_len - (ngram_len - 1)).
    """
    self._check_input_ids_shape(input_ids)
    batch_size, _ = input_ids.shape
    state = SynthIDState(
        batch_size=batch_size,
        ngram_len=self.ngram_len,
        context_history_size=self.context_history_size,
        device=self.device,
    )
    contexts = input_ids[:, :-1].unfold(
        dimension=1,
        size=self.ngram_len - 1,
        step=1,
    )
    _, num_contexts, _ = contexts.shape
    are_repeated_contexts = []
    for i in range(num_contexts):
      context = contexts[:, i, :]
      hash_result = torch.ones(batch_size, device=self.device, dtype=torch.long)
      context_hash = hashing_function.accumulate_hash(hash_result, context)[
          :, None
      ]
      is_repeated_context = (state.context_history == context_hash).any(
          dim=1,
          keepdim=True,
      )
      are_repeated_contexts.append(is_repeated_context)
      state.context_history = torch.concat(
          (context_hash, state.context_history),
          dim=1,
      )[:, :-1]
    are_repeated_contexts = torch.concat(are_repeated_contexts, dim=1)
    return torch.logical_not(are_repeated_contexts) | 
	Computes repetition mask.
    0 and 1 stand for repeated and not repeated context n-1 grams respectively.
    Args:
      input_ids: Input token ids (batch_size, input_len).
    Returns:
      Repetitions mask (batch_size, input_len - (ngram_len - 1)).
     | 
	compute_context_repetition_mask | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing.py | 
	Apache-2.0 | 
| 
	def compute_eos_token_mask(
      self,
      input_ids: torch.LongTensor,
      eos_token_id: int,
  ) -> torch.LongTensor:
    """Computes repetitions mask.
    1 stands for ngrams that don't contain EOS tokens and vice versa.
    Args:
      input_ids: Input token ids (batch_size, input_len).
      eos_token_id: EOS token ID.
    Returns:
      EOS token mask (batch_size, input_len).
    """
    self._check_input_ids_shape(input_ids)
    noneos_masks = []
    all_eos_equated = input_ids == eos_token_id
    for eos_equated in all_eos_equated:
      nonzero_idx = torch.nonzero(eos_equated)
      noneos_mask = torch.ones_like(eos_equated)
      if nonzero_idx.shape[0] != 0:
        noneos_mask[nonzero_idx[0][0] :] = 0
      noneos_masks.append(noneos_mask)
    return torch.stack(noneos_masks, dim=0) | 
	Computes repetitions mask.
    1 stands for ngrams that don't contain EOS tokens and vice versa.
    Args:
      input_ids: Input token ids (batch_size, input_len).
      eos_token_id: EOS token ID.
    Returns:
      EOS token mask (batch_size, input_len).
     | 
	compute_eos_token_mask | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing.py | 
	Apache-2.0 | 
| 
	def does_mean_g_value_matches_theoretical(
    vocab_size: int,
    ngram_len: int,
    batch_size: int,
    keys: Sequence[int],
    atol: float,
    device: torch.device,
    num_leaves: int = 2,
) -> tuple[float, float, bool]:
  """Tests that the mean g-value is close to theoretical value.
  SynthIDLogitsProcessor is tested on its own using random input tokens.
  Args:
    vocab_size: vocab size of the model.
    ngram_len: length of the ngram.
    batch_size: batch size of the model.
    keys: keys used for watermarking.
    atol: absolute tolerance for the mean g-value.
    device: device to use for the test.
    num_leaves: number of children per node in the tournament tree.
  Returns:
    A tuple of mean g-value, the expected mean g-value and the boolean result
    of the test.
  """
  generator = torch.Generator(device=device).manual_seed(0)
  # Use 10**9 rather than vocab_size to ensure variety in (n-1)-grams.
  context = torch.randint(
      low=0,
      high=10**9,
      size=(batch_size, ngram_len - 1),
      dtype=torch.int64,
      generator=generator,
      device=device,
  )
  context_history_size = 1024
  logits_processor = logits_processing.SynthIDLogitsProcessor(
      ngram_len=ngram_len,
      keys=keys,
      sampling_table_size=2**16,
      sampling_table_seed=0,
      context_history_size=context_history_size,
      device=device,
      top_k=vocab_size,
      temperature=0.7,
      num_leaves=num_leaves,
  )
  scores = torch.ones(
      (batch_size, vocab_size),
      dtype=torch.float64,
      device=device,
  )
  # Init state of the logits processor.
  logits_processor.watermarked_call(context, scores)
  # insert context into the state.
  for idx in range(1, ngram_len - 1):
    _ = logits_processor.watermarked_call(context[:, :idx], scores)
  updated_scores, indices_mapping, _ = logits_processor.watermarked_call(
      context, scores
  )
  probs = torch.nn.functional.softmax(updated_scores, dim=1)
  next_tokens = torch.multinomial(
      probs,
      num_samples=1,
      generator=generator,
  )
  # Re-map to dense indices with indices_mapping.
  next_tokens = torch.vmap(torch.take, in_dims=0, out_dims=0)(
      indices_mapping, next_tokens
  )
  ngrams = torch.concat((context, next_tokens), dim=1)
  g_values = logits_processor.compute_g_values(ngrams)
  mean_g_values = g_values.mean(dtype=torch.float64, dim=(0, 1))
  expected_mean_g_value = g_value_expectations.expected_mean_g_value(
      vocab_size=vocab_size, num_leaves=num_leaves
  )
  is_close = torch.all(
      torch.isclose(
          mean_g_values,
          torch.tensor(
              expected_mean_g_value, dtype=torch.float64, device=device
          ),
          atol=atol,
          rtol=0,
      )
  )
  return mean_g_values, expected_mean_g_value, is_close | 
	Tests that the mean g-value is close to theoretical value.
  SynthIDLogitsProcessor is tested on its own using random input tokens.
  Args:
    vocab_size: vocab size of the model.
    ngram_len: length of the ngram.
    batch_size: batch size of the model.
    keys: keys used for watermarking.
    atol: absolute tolerance for the mean g-value.
    device: device to use for the test.
    num_leaves: number of children per node in the tournament tree.
  Returns:
    A tuple of mean g-value, the expected mean g-value and the boolean result
    of the test.
   | 
	does_mean_g_value_matches_theoretical | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing_test.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing_test.py | 
	Apache-2.0 | 
| 
	def test_distributional_convergence(self):
    """Check if watermarked distribution converges to input distribution."""
    vocab_size = 2
    batch_size = 1500
    num_keys = 1000
    device = torch_testing.torch_device()
    temperature = 1.0
    updated_softmaxes = 0
    for _ in tqdm.tqdm(range(num_keys)):
      watermarking_config = immutabledict.immutabledict({
          'ngram_len': 5,
          'keys': np.random.randint(0, 10**9, size=(1,), dtype=np.int64),
          'sampling_table_size': 2**16,
          'sampling_table_seed': 0,
          'context_history_size': 1024,
          'device': device,
      })
      logits_processor = logits_processing.SynthIDLogitsProcessor(
          **watermarking_config,
          top_k=vocab_size,
          temperature=temperature,
          apply_top_k=False,
      )
      ngrams = torch.randint(
          low=0,
          high=vocab_size,
          size=(batch_size, watermarking_config['ngram_len']),
          device=device,
      )
      # Insert ngram-1 into logit_processor state.
      for idx in range(watermarking_config['ngram_len'] - 1):
        _ = logits_processor.watermarked_call(
            ngrams[:, :idx], torch.ones((batch_size, vocab_size), device=device)
        )
      scores = torch.ones((batch_size, vocab_size), device=device)
      updated_scores, _, _ = logits_processor.watermarked_call(ngrams, scores)
      updated_softmaxes += (
          torch.nn.functional.softmax(updated_scores, dim=1).cpu().numpy()
      )
    updated_softmaxes = np.mean(updated_softmaxes, axis=0) / num_keys
    for softmax in updated_softmaxes:
      self.assertAlmostEqual(softmax, 0.5, delta=0.002) | 
	Check if watermarked distribution converges to input distribution. | 
	test_distributional_convergence | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing_test.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing_test.py | 
	Apache-2.0 | 
| 
	def test_bias_from_logits_processor(
      self, vocab_size, ngram_len, num_layers, atol, num_leaves: int = 2,
  ):
    """Check if watermarked distribution converges to input distribution."""
    device = torch_testing.torch_device()
    mean, expected, passes = does_mean_g_value_matches_theoretical(
        vocab_size=vocab_size,
        ngram_len=ngram_len,
        batch_size=20_000,
        keys=[np.random.randint(0, 10**9) for _ in range(num_layers)],
        atol=atol,
        device=device,
        num_leaves=num_leaves,
    )
    self.assertTrue(passes) | 
	Check if watermarked distribution converges to input distribution. | 
	test_bias_from_logits_processor | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing_test.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing_test.py | 
	Apache-2.0 | 
| 
	def set_up_logits_processor(
      self,
      batch_size,
      sequence_len,
      num_layers,
      ngram_len,
      top_k,
      vocab_size,
  ):
    """Setup function for all the tests."""
    device = torch_testing.torch_device()
    watermarking_config = immutabledict.immutabledict({
        'ngram_len': ngram_len,
        'keys': np.random.randint(low=0, high=2**16, size=(num_layers,)),
        'sampling_table_size': 2**16,
        'sampling_table_seed': 0,
        'context_history_size': 512,
        'device': device,
    })
    logits_processor = logits_processing.SynthIDLogitsProcessor(
        **watermarking_config, top_k=top_k, temperature=1.0
    )
    sequences = torch.randint(
        low=0,
        high=vocab_size,
        size=(batch_size, sequence_len),
        device=device,
    )
    return logits_processor, sequences, device | 
	Setup function for all the tests. | 
	set_up_logits_processor | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/logits_processing_test.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/logits_processing_test.py | 
	Apache-2.0 | 
| 
	def _get_logits_warper(
      self,
      generation_config: transformers.GenerationConfig,
      **unused_kw,
  ) -> transformers.LogitsProcessorList:
    """Constructs and returns a list of warpers.
    This overrides the base class's implementation to control how we apply top_k
    and temperature. Only the SynthIDLogitsProcessor warper is constructed that
    performs top_k and temperature scaling before applying watermark. This is
    to improve the latency impact by watermarking by only considering the top_k
    indices for watermarking.
    Args:
     generation_config: Config used for generation with this model.
    Returns:
     List of logits processors to be applied at inference time.
    """
    extra_params = {}
    # Add temperature to extra params
    if not (
        generation_config.temperature is not None
        and 0.0 <= generation_config.temperature <= 1.0
    ):
      raise ValueError(
          f"Invalid temperature {generation_config.temperature} when sampling"
          " with watermarking. Temperature should be between 0.0 and 1.0."
      )
    extra_params["temperature"] = generation_config.temperature
    # Add top_k to extra params.
    if not (
        generation_config.top_k is not None and generation_config.top_k >= 1
    ):
      raise ValueError(
          f"Invalid top_k {generation_config.top_k} when sampling with"
          " watermarking. Top_k should >= 1."
      )
    extra_params["top_k"] = generation_config.top_k
    return self._construct_warper_list(extra_params) | 
	Constructs and returns a list of warpers.
    This overrides the base class's implementation to control how we apply top_k
    and temperature. Only the SynthIDLogitsProcessor warper is constructed that
    performs top_k and temperature scaling before applying watermark. This is
    to improve the latency impact by watermarking by only considering the top_k
    indices for watermarking.
    Args:
     generation_config: Config used for generation with this model.
    Returns:
     List of logits processors to be applied at inference time.
     | 
	_get_logits_warper | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/synthid_mixin.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/synthid_mixin.py | 
	Apache-2.0 | 
| 
	def _sample(
      self,
      input_ids: torch.LongTensor,
      logits_processor: transformers.LogitsProcessorList,
      stopping_criteria: transformers.StoppingCriteriaList,
      generation_config: transformers.GenerationConfig,
      synced_gpus: bool,
      streamer: Optional["transformers.BaseStreamer"],
      logits_warper: Optional[transformers.LogitsProcessorList] = None,
      **model_kwargs,
  ) -> Union[
      transformers.generation.utils.GenerateNonBeamOutput, torch.LongTensor
  ]:
    r"""Sample sequence of tokens.
    Generates sequences of token ids for models with a language modeling head
    using **multinomial sampling** and
    can be used for text-decoder, text-to-text, speech-to-text, and
    vision-to-text models.
    This function is copied and changed minimally from the HuggingFace
    repository to support watermarking implementation.
    This overrides the base class implementation to achieve watermarking of the
    logits before they are sampled. This is done specifically so as to preserve
    the top_k indices separately without making the logits dense with all the
    indices. This removes extra overhead of considering all possible indices for
    watermarking.
    Args:
        input_ids: The sequence used as a prompt for the generation.
        logits_processor: List of instances of class derived from
          [`LogitsProcessor`] used to modify the prediction scores of the
          language modeling head applied at each generation step.
        stopping_criteria: An instance of [`StoppingCriteriaList`]. List of
          instances of class derived from [`StoppingCriteria`] used to tell if
          the generation loop should stop.
        generation_config: The generation configuration to be used as
          parametrization of the decoding method.
        synced_gpus: Whether to continue running the while loop until max_length
          (needed for ZeRO stage 3)
        streamer: Streamer object that will be used to stream the generated
          sequences. Generated tokens are passed through
          `streamer.put(token_ids)` and the streamer is responsible for any
          further processing.
        logits_warper: List of instances of class derived from [`LogitsWarper`]
          used to warp the prediction score distribution of the language
          modeling head applied before multinomial sampling at each generation
          step. Only required with sampling strategies (i.e. `do_sample` is set
          in `generation_config`)
        **model_kwargs: Additional model specific kwargs will be forwarded to
          the `forward` function of the model. If model is an encoder-decoder
          model the kwargs should include `encoder_outputs`.
    Returns:
        A `torch.LongTensor` containing the generated tokens (default behaviour)
        or a
        [`~generation.GenerateDecoderOnlyOutput`] if
        `model.config.is_encoder_decoder=False` and
        `return_dict_in_generate=True` or a
        [`~generation.GenerateEncoderDecoderOutput`] if
        `model.config.is_encoder_decoder=True`.
    """
    # init values
    pad_token_id = generation_config.pad_token_id
    output_attentions = generation_config.output_attentions
    output_hidden_states = generation_config.output_hidden_states
    output_scores = generation_config.output_scores
    output_logits = generation_config.output_logits
    return_dict_in_generate = generation_config.return_dict_in_generate
    has_eos_stopping_criteria = any(
        hasattr(criteria, "eos_token_id") for criteria in stopping_criteria
    )
    do_sample = generation_config.do_sample
    if do_sample and not isinstance(
        logits_warper, transformers.LogitsProcessorList
    ):
      raise ValueError(
          "`do_sample` is set to `True`, `logits_warper` must be a"
          f" `LogitsProcessorList` instance (it is {logits_warper})."
      )
    if has_eos_stopping_criteria and pad_token_id is None:
      raise ValueError(
          "`stopping_criteria` is not empty, `pad_token_id` must be set in "
          "`generation_config`. See "
          "https://huggingface.co/docs/transformers/main/en/main_classes/text_generation#transformers.GenerationConfig"
          "for more on how to configure the `pad_token_id`."
      )
    # init attention / hidden states / scores tuples
    scores = () if (return_dict_in_generate and output_scores) else None
    raw_logits = () if (return_dict_in_generate and output_logits) else None
    decoder_attentions = (
        () if (return_dict_in_generate and output_attentions) else None
    )
    cross_attentions = (
        () if (return_dict_in_generate and output_attentions) else None
    )
    decoder_hidden_states = (
        () if (return_dict_in_generate and output_hidden_states) else None
    )
    # if model is an encoder-decoder, retrieve encoder attention weights and
    # hidden states
    encoder_attentions = None
    encoder_hidden_states = None
    if return_dict_in_generate and self.config.is_encoder_decoder:  # pytype: disable=attribute-error
      encoder_attentions = (
          model_kwargs["encoder_outputs"].get("attentions")
          if output_attentions
          else None
      )
      encoder_hidden_states = (
          model_kwargs["encoder_outputs"].get("hidden_states")
          if output_hidden_states
          else None
      )
    # keep track of which sequences are already finished
    batch_size = input_ids.shape[0]
    this_peer_finished = False
    unfinished_sequences = torch.ones(
        batch_size, dtype=torch.long, device=input_ids.device
    )
    model_kwargs = self._get_initial_cache_position(input_ids, model_kwargs)  # pytype: disable=attribute-error
    while self._has_unfinished_sequences(  # pytype: disable=attribute-error
        this_peer_finished, synced_gpus, device=input_ids.device
    ):
      # prepare model inputs
      model_inputs = self.prepare_inputs_for_generation(  # pytype: disable=attribute-error
          input_ids, **model_kwargs
      )
      # forward pass to get next token
      outputs = self(  # pytype: disable=not-callable
          **model_inputs,
          return_dict=True,
          output_attentions=output_attentions,
          output_hidden_states=output_hidden_states,
      )
      if synced_gpus and this_peer_finished:
        continue  # don't waste resources running the code we don't need
      # Clone is needed to avoid keeping a hanging ref to outputs.logits which
      # may be very large for first iteration (the clone itself is always small)
      next_token_logits = outputs.logits[:, -1, :].clone()
      # pre-process distribution
      next_token_scores = logits_processor(input_ids, next_token_logits)
      indices_mapping = None
      unwatermarked_scores = None
      if do_sample:
        *regular_warpers, watermarking_logits_warper = logits_warper
        if not isinstance(
            watermarking_logits_warper,
            logits_processing.SynthIDLogitsProcessor,
        ):
          raise ValueError(
              "SynthIDLogitsProcessor should be the final warper in the list"
              " while watermarking."
          )
        for logit_warper in regular_warpers:
          next_token_scores = logit_warper(input_ids, next_token_scores)
        # Watermark final scores with sparse top_k.
        next_token_scores, indices_mapping, unwatermarked_scores = (
            watermarking_logits_warper.watermarked_call(
                input_ids, next_token_scores
            )
        )
      # token selection
      if do_sample:
        probs = torch.nn.functional.softmax(next_token_scores, dim=-1)
        next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
      else:
        next_tokens = torch.argmax(next_token_scores, dim=-1)
      # Store scores, attentions and hidden_states when required
      if return_dict_in_generate:
        if output_scores:
          assert unwatermarked_scores is not None
          score = torch.gather(
              -torch.log(torch.nn.Softmax(dim=1)(unwatermarked_scores)),
              1,
              next_tokens[:, None],
          )
          scores += (score,)
        if output_logits:
          raw_logits += (next_token_logits,)
        if output_attentions:
          decoder_attentions += (
              (outputs.decoder_attentions,)
              if self.config.is_encoder_decoder  # pytype: disable=attribute-error
              else (outputs.attentions,)
          )
          if self.config.is_encoder_decoder:  # pytype: disable=attribute-error
            cross_attentions += (outputs.cross_attentions,)
        if output_hidden_states:
          decoder_hidden_states += (
              (outputs.decoder_hidden_states,)
              if self.config.is_encoder_decoder  # pytype: disable=attribute-error
              else (outputs.hidden_states,)
          )
      assert indices_mapping is not None
      # re-mapping to dense indices with indices_mapping
      next_tokens = torch.vmap(torch.take, in_dims=0, out_dims=0)(
          indices_mapping, next_tokens
      )
      # finished sentences should have their next token be a padding token
      if has_eos_stopping_criteria:
        next_tokens = next_tokens * unfinished_sequences + pad_token_id * (
            1 - unfinished_sequences
        )
      # update generated ids, model inputs, and length for next step
      input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
      if streamer is not None:
        streamer.put(next_tokens.cpu())
      model_kwargs = self._update_model_kwargs_for_generation(  # pytype: disable=attribute-error
          outputs,
          model_kwargs,
          is_encoder_decoder=self.config.is_encoder_decoder,  # pytype: disable=attribute-error
      )
      unfinished_sequences = unfinished_sequences & ~stopping_criteria(
          input_ids, scores
      )
      this_peer_finished = unfinished_sequences.max() == 0
      # This is needed to properly delete outputs.logits which may be very large
      # for first iteration. Otherwise a reference to outputs is kept which
      # keeps the logits alive in the next iteration
      del outputs
    if streamer is not None:
      streamer.end()
    if return_dict_in_generate:
      if self.config.is_encoder_decoder:  # pytype: disable=attribute-error
        return transformers.generation.utils.GenerateEncoderDecoderOutput(
            sequences=input_ids,
            scores=scores,
            logits=raw_logits,
            encoder_attentions=encoder_attentions,
            encoder_hidden_states=encoder_hidden_states,
            decoder_attentions=decoder_attentions,
            cross_attentions=cross_attentions,
            decoder_hidden_states=decoder_hidden_states,
            past_key_values=model_kwargs.get("past_key_values"),
        )
      else:
        return transformers.generation.utils.GenerateDecoderOnlyOutput(
            sequences=input_ids,
            scores=scores,
            logits=raw_logits,
            attentions=decoder_attentions,
            hidden_states=decoder_hidden_states,
            past_key_values=model_kwargs.get("past_key_values"),
        )
    else:
      return input_ids | 
	Sample sequence of tokens.
    Generates sequences of token ids for models with a language modeling head
    using **multinomial sampling** and
    can be used for text-decoder, text-to-text, speech-to-text, and
    vision-to-text models.
    This function is copied and changed minimally from the HuggingFace
    repository to support watermarking implementation.
    This overrides the base class implementation to achieve watermarking of the
    logits before they are sampled. This is done specifically so as to preserve
    the top_k indices separately without making the logits dense with all the
    indices. This removes extra overhead of considering all possible indices for
    watermarking.
    Args:
        input_ids: The sequence used as a prompt for the generation.
        logits_processor: List of instances of class derived from
          [`LogitsProcessor`] used to modify the prediction scores of the
          language modeling head applied at each generation step.
        stopping_criteria: An instance of [`StoppingCriteriaList`]. List of
          instances of class derived from [`StoppingCriteria`] used to tell if
          the generation loop should stop.
        generation_config: The generation configuration to be used as
          parametrization of the decoding method.
        synced_gpus: Whether to continue running the while loop until max_length
          (needed for ZeRO stage 3)
        streamer: Streamer object that will be used to stream the generated
          sequences. Generated tokens are passed through
          `streamer.put(token_ids)` and the streamer is responsible for any
          further processing.
        logits_warper: List of instances of class derived from [`LogitsWarper`]
          used to warp the prediction score distribution of the language
          modeling head applied before multinomial sampling at each generation
          step. Only required with sampling strategies (i.e. `do_sample` is set
          in `generation_config`)
        **model_kwargs: Additional model specific kwargs will be forwarded to
          the `forward` function of the model. If model is an encoder-decoder
          model the kwargs should include `encoder_outputs`.
    Returns:
        A `torch.LongTensor` containing the generated tokens (default behaviour)
        or a
        [`~generation.GenerateDecoderOnlyOutput`] if
        `model.config.is_encoder_decoder=False` and
        `return_dict_in_generate=True` or a
        [`~generation.GenerateEncoderDecoderOutput`] if
        `model.config.is_encoder_decoder=True`.
     | 
	_sample | 
	python | 
	google-deepmind/synthid-text | 
	src/synthid_text/synthid_mixin.py | 
	https://github.com/google-deepmind/synthid-text/blob/master/src/synthid_text/synthid_mixin.py | 
	Apache-2.0 | 
| 
	def get_root():
    """Get the project root directory.
    We require that all commands are run from the project root, i.e. the
    directory that contains setup.py, setup.cfg, and versioneer.py .
    """
    root = os.path.realpath(os.path.abspath(os.getcwd()))
    setup_py = os.path.join(root, "setup.py")
    versioneer_py = os.path.join(root, "versioneer.py")
    if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
        # allow 'python path/to/setup.py COMMAND'
        root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
        setup_py = os.path.join(root, "setup.py")
        versioneer_py = os.path.join(root, "versioneer.py")
    if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
        err = ("Versioneer was unable to run the project root directory. "
               "Versioneer requires setup.py to be executed from "
               "its immediate directory (like 'python setup.py COMMAND'), "
               "or in a way that lets it use sys.argv[0] to find the root "
               "(like 'python path/to/setup.py COMMAND').")
        raise VersioneerBadRootError(err)
    try:
        # Certain runtime workflows (setup.py install/develop in a setuptools
        # tree) execute all dependencies in a single python process, so
        # "versioneer" may be imported multiple times, and python's shared
        # module-import table will cache the first one. So we can't use
        # os.path.dirname(__file__), as that will find whichever
        # versioneer.py was first imported, even in later projects.
        me = os.path.realpath(os.path.abspath(__file__))
        me_dir = os.path.normcase(os.path.splitext(me)[0])
        vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
        if me_dir != vsr_dir:
            print("Warning: build in %s is using versioneer.py from %s"
                  % (os.path.dirname(me), versioneer_py))
    except NameError:
        pass
    return root | 
	Get the project root directory.
    We require that all commands are run from the project root, i.e. the
    directory that contains setup.py, setup.cfg, and versioneer.py .
     | 
	get_root | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def get_config_from_root(root):
    """Read the project setup.cfg file to determine Versioneer config."""
    # This might raise EnvironmentError (if setup.cfg is missing), or
    # configparser.NoSectionError (if it lacks a [versioneer] section), or
    # configparser.NoOptionError (if it lacks "VCS="). See the docstring at
    # the top of versioneer.py for instructions on writing your setup.cfg .
    setup_cfg = os.path.join(root, "setup.cfg")
    parser = configparser.SafeConfigParser()
    with open(setup_cfg, "r") as f:
        parser.readfp(f)
    VCS = parser.get("versioneer", "VCS")  # mandatory
    def get(parser, name):
        if parser.has_option("versioneer", name):
            return parser.get("versioneer", name)
        return None
    cfg = VersioneerConfig()
    cfg.VCS = VCS
    cfg.style = get(parser, "style") or ""
    cfg.versionfile_source = get(parser, "versionfile_source")
    cfg.versionfile_build = get(parser, "versionfile_build")
    cfg.tag_prefix = get(parser, "tag_prefix")
    if cfg.tag_prefix in ("''", '""'):
        cfg.tag_prefix = ""
    cfg.parentdir_prefix = get(parser, "parentdir_prefix")
    cfg.verbose = get(parser, "verbose")
    return cfg | 
	Read the project setup.cfg file to determine Versioneer config. | 
	get_config_from_root | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def register_vcs_handler(vcs, method):  # decorator
    """Decorator to mark a method as the handler for a particular VCS."""
    def decorate(f):
        """Store f in HANDLERS[vcs][method]."""
        if vcs not in HANDLERS:
            HANDLERS[vcs] = {}
        HANDLERS[vcs][method] = f
        return f
    return decorate | 
	Decorator to mark a method as the handler for a particular VCS. | 
	register_vcs_handler | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def git_get_keywords(versionfile_abs):
    """Extract version information from the given file."""
    # the code embedded in _version.py can just fetch the value of these
    # keywords. When used from setup.py, we don't want to import _version.py,
    # so we do it with a regexp instead. This function is not used from
    # _version.py.
    keywords = {}
    try:
        f = open(versionfile_abs, "r")
        for line in f.readlines():
            if line.strip().startswith("git_refnames ="):
                mo = re.search(r'=\s*"(.*)"', line)
                if mo:
                    keywords["refnames"] = mo.group(1)
            if line.strip().startswith("git_full ="):
                mo = re.search(r'=\s*"(.*)"', line)
                if mo:
                    keywords["full"] = mo.group(1)
            if line.strip().startswith("git_date ="):
                mo = re.search(r'=\s*"(.*)"', line)
                if mo:
                    keywords["date"] = mo.group(1)
        f.close()
    except EnvironmentError:
        pass
    return keywords | 
	Extract version information from the given file. | 
	git_get_keywords | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def git_versions_from_keywords(keywords, tag_prefix, verbose):
    """Get version information from git keywords."""
    if not keywords:
        raise NotThisMethod("no keywords at all, weird")
    date = keywords.get("date")
    if date is not None:
        # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
        # datestamp. However we prefer "%ci" (which expands to an "ISO-8601
        # -like" string, which we must then edit to make compliant), because
        # it's been around since git-1.5.3, and it's too difficult to
        # discover which version we're using, or to work around using an
        # older one.
        date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
    refnames = keywords["refnames"].strip()
    if refnames.startswith("$Format"):
        if verbose:
            print("keywords are unexpanded, not using")
        raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
    refs = set([r.strip() for r in refnames.strip("()").split(",")])
    # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
    # just "foo-1.0". If we see a "tag: " prefix, prefer those.
    TAG = "tag: "
    tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
    if not tags:
        # Either we're using git < 1.8.3, or there really are no tags. We use
        # a heuristic: assume all version tags have a digit. The old git %d
        # expansion behaves like git log --decorate=short and strips out the
        # refs/heads/ and refs/tags/ prefixes that would let us distinguish
        # between branches and tags. By ignoring refnames without digits, we
        # filter out many common branch names like "release" and
        # "stabilization", as well as "HEAD" and "master".
        tags = set([r for r in refs if re.search(r'\d', r)])
        if verbose:
            print("discarding '%s', no digits" % ",".join(refs - tags))
    if verbose:
        print("likely tags: %s" % ",".join(sorted(tags)))
    for ref in sorted(tags):
        # sorting will prefer e.g. "2.0" over "2.0rc1"
        if ref.startswith(tag_prefix):
            r = ref[len(tag_prefix):]
            if verbose:
                print("picking %s" % r)
            return {"version": r,
                    "full-revisionid": keywords["full"].strip(),
                    "dirty": False, "error": None,
                    "date": date}
    # no suitable tags, so version is "0+unknown", but full hex is still there
    if verbose:
        print("no suitable tags, using unknown + full revision id")
    return {"version": "0+unknown",
            "full-revisionid": keywords["full"].strip(),
            "dirty": False, "error": "no suitable tags", "date": None} | 
	Get version information from git keywords. | 
	git_versions_from_keywords | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
    """Get version from 'git describe' in the root of the source tree.
    This only gets called if the git-archive 'subst' keywords were *not*
    expanded, and _version.py hasn't already been rewritten with a short
    version string, meaning we're inside a checked out source tree.
    """
    GITS = ["git"]
    if sys.platform == "win32":
        GITS = ["git.cmd", "git.exe"]
    out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
                          hide_stderr=True)
    if rc != 0:
        if verbose:
            print("Directory %s not under git control" % root)
        raise NotThisMethod("'git rev-parse --git-dir' returned error")
    # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
    # if there isn't one, this yields HEX[-dirty] (no NUM)
    describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
                                          "--always", "--long",
                                          "--match", "%s*" % tag_prefix],
                                   cwd=root)
    # --long was added in git-1.5.5
    if describe_out is None:
        raise NotThisMethod("'git describe' failed")
    describe_out = describe_out.strip()
    full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
    if full_out is None:
        raise NotThisMethod("'git rev-parse' failed")
    full_out = full_out.strip()
    pieces = {}
    pieces["long"] = full_out
    pieces["short"] = full_out[:7]  # maybe improved later
    pieces["error"] = None
    # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
    # TAG might have hyphens.
    git_describe = describe_out
    # look for -dirty suffix
    dirty = git_describe.endswith("-dirty")
    pieces["dirty"] = dirty
    if dirty:
        git_describe = git_describe[:git_describe.rindex("-dirty")]
    # now we have TAG-NUM-gHEX or HEX
    if "-" in git_describe:
        # TAG-NUM-gHEX
        mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
        if not mo:
            # unparseable. Maybe git-describe is misbehaving?
            pieces["error"] = ("unable to parse git-describe output: '%s'"
                               % describe_out)
            return pieces
        # tag
        full_tag = mo.group(1)
        if not full_tag.startswith(tag_prefix):
            if verbose:
                fmt = "tag '%s' doesn't start with prefix '%s'"
                print(fmt % (full_tag, tag_prefix))
            pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
                               % (full_tag, tag_prefix))
            return pieces
        pieces["closest-tag"] = full_tag[len(tag_prefix):]
        # distance: number of commits since tag
        pieces["distance"] = int(mo.group(2))
        # commit: short hex revision ID
        pieces["short"] = mo.group(3)
    else:
        # HEX: no tags
        pieces["closest-tag"] = None
        count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
                                    cwd=root)
        pieces["distance"] = int(count_out)  # total number of commits
    # commit date: see ISO-8601 comment in git_versions_from_keywords()
    date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
                       cwd=root)[0].strip()
    pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
    return pieces | 
	Get version from 'git describe' in the root of the source tree.
    This only gets called if the git-archive 'subst' keywords were *not*
    expanded, and _version.py hasn't already been rewritten with a short
    version string, meaning we're inside a checked out source tree.
     | 
	git_pieces_from_vcs | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def do_vcs_install(manifest_in, versionfile_source, ipy):
    """Git-specific installation logic for Versioneer.
    For Git, this means creating/changing .gitattributes to mark _version.py
    for export-subst keyword substitution.
    """
    GITS = ["git"]
    if sys.platform == "win32":
        GITS = ["git.cmd", "git.exe"]
    files = [manifest_in, versionfile_source]
    if ipy:
        files.append(ipy)
    try:
        me = __file__
        if me.endswith(".pyc") or me.endswith(".pyo"):
            me = os.path.splitext(me)[0] + ".py"
        versioneer_file = os.path.relpath(me)
    except NameError:
        versioneer_file = "versioneer.py"
    files.append(versioneer_file)
    present = False
    try:
        f = open(".gitattributes", "r")
        for line in f.readlines():
            if line.strip().startswith(versionfile_source):
                if "export-subst" in line.strip().split()[1:]:
                    present = True
        f.close()
    except EnvironmentError:
        pass
    if not present:
        f = open(".gitattributes", "a+")
        f.write("%s export-subst\n" % versionfile_source)
        f.close()
        files.append(".gitattributes")
    run_command(GITS, ["add", "--"] + files) | 
	Git-specific installation logic for Versioneer.
    For Git, this means creating/changing .gitattributes to mark _version.py
    for export-subst keyword substitution.
     | 
	do_vcs_install | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def versions_from_parentdir(parentdir_prefix, root, verbose):
    """Try to determine the version from the parent directory name.
    Source tarballs conventionally unpack into a directory that includes both
    the project name and a version string. We will also support searching up
    two directory levels for an appropriately named parent directory
    """
    rootdirs = []
    for i in range(3):
        dirname = os.path.basename(root)
        if dirname.startswith(parentdir_prefix):
            return {"version": dirname[len(parentdir_prefix):],
                    "full-revisionid": None,
                    "dirty": False, "error": None, "date": None}
        else:
            rootdirs.append(root)
            root = os.path.dirname(root)  # up a level
    if verbose:
        print("Tried directories %s but none started with prefix %s" %
              (str(rootdirs), parentdir_prefix))
    raise NotThisMethod("rootdir doesn't start with parentdir_prefix") | 
	Try to determine the version from the parent directory name.
    Source tarballs conventionally unpack into a directory that includes both
    the project name and a version string. We will also support searching up
    two directory levels for an appropriately named parent directory
     | 
	versions_from_parentdir | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def versions_from_file(filename):
    """Try to determine the version from _version.py if present."""
    try:
        with open(filename) as f:
            contents = f.read()
    except EnvironmentError:
        raise NotThisMethod("unable to read _version.py")
    mo = re.search(r"version_json = '''\n(.*)'''  # END VERSION_JSON",
                   contents, re.M | re.S)
    if not mo:
        mo = re.search(r"version_json = '''\r\n(.*)'''  # END VERSION_JSON",
                       contents, re.M | re.S)
    if not mo:
        raise NotThisMethod("no version_json in _version.py")
    return json.loads(mo.group(1)) | 
	Try to determine the version from _version.py if present. | 
	versions_from_file | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def write_to_version_file(filename, versions):
    """Write the given version number to the given _version.py file."""
    os.unlink(filename)
    contents = json.dumps(versions, sort_keys=True,
                          indent=1, separators=(",", ": "))
    with open(filename, "w") as f:
        f.write(SHORT_VERSION_PY % contents)
    print("set %s to '%s'" % (filename, versions["version"])) | 
	Write the given version number to the given _version.py file. | 
	write_to_version_file | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def plus_or_dot(pieces):
    """Return a + if we don't already have one, else return a ."""
    if "+" in pieces.get("closest-tag", ""):
        return "."
    return "+" | 
	Return a + if we don't already have one, else return a . | 
	plus_or_dot | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def render_pep440(pieces):
    """Build up version string, with post-release "local version identifier".
    Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
    get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
    Exceptions:
    1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        if pieces["distance"] or pieces["dirty"]:
            rendered += plus_or_dot(pieces)
            rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
            if pieces["dirty"]:
                rendered += ".dirty"
    else:
        # exception #1
        rendered = "0+untagged.%d.g%s" % (pieces["distance"],
                                          pieces["short"])
        if pieces["dirty"]:
            rendered += ".dirty"
    return rendered | 
	Build up version string, with post-release "local version identifier".
    Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
    get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
    Exceptions:
    1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
     | 
	render_pep440 | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def render_pep440_pre(pieces):
    """TAG[.post.devDISTANCE] -- No -dirty.
    Exceptions:
    1: no tags. 0.post.devDISTANCE
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        if pieces["distance"]:
            rendered += ".post.dev%d" % pieces["distance"]
    else:
        # exception #1
        rendered = "0.post.dev%d" % pieces["distance"]
    return rendered | 
	TAG[.post.devDISTANCE] -- No -dirty.
    Exceptions:
    1: no tags. 0.post.devDISTANCE
     | 
	render_pep440_pre | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def render_pep440_post(pieces):
    """TAG[.postDISTANCE[.dev0]+gHEX] .
    The ".dev0" means dirty. Note that .dev0 sorts backwards
    (a dirty tree will appear "older" than the corresponding clean one),
    but you shouldn't be releasing software with -dirty anyways.
    Exceptions:
    1: no tags. 0.postDISTANCE[.dev0]
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        if pieces["distance"] or pieces["dirty"]:
            rendered += ".post%d" % pieces["distance"]
            if pieces["dirty"]:
                rendered += ".dev0"
            rendered += plus_or_dot(pieces)
            rendered += "g%s" % pieces["short"]
    else:
        # exception #1
        rendered = "0.post%d" % pieces["distance"]
        if pieces["dirty"]:
            rendered += ".dev0"
        rendered += "+g%s" % pieces["short"]
    return rendered | 
	TAG[.postDISTANCE[.dev0]+gHEX] .
    The ".dev0" means dirty. Note that .dev0 sorts backwards
    (a dirty tree will appear "older" than the corresponding clean one),
    but you shouldn't be releasing software with -dirty anyways.
    Exceptions:
    1: no tags. 0.postDISTANCE[.dev0]
     | 
	render_pep440_post | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def render_pep440_old(pieces):
    """TAG[.postDISTANCE[.dev0]] .
    The ".dev0" means dirty.
    Eexceptions:
    1: no tags. 0.postDISTANCE[.dev0]
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        if pieces["distance"] or pieces["dirty"]:
            rendered += ".post%d" % pieces["distance"]
            if pieces["dirty"]:
                rendered += ".dev0"
    else:
        # exception #1
        rendered = "0.post%d" % pieces["distance"]
        if pieces["dirty"]:
            rendered += ".dev0"
    return rendered | 
	TAG[.postDISTANCE[.dev0]] .
    The ".dev0" means dirty.
    Eexceptions:
    1: no tags. 0.postDISTANCE[.dev0]
     | 
	render_pep440_old | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def render_git_describe(pieces):
    """TAG[-DISTANCE-gHEX][-dirty].
    Like 'git describe --tags --dirty --always'.
    Exceptions:
    1: no tags. HEX[-dirty]  (note: no 'g' prefix)
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        if pieces["distance"]:
            rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
    else:
        # exception #1
        rendered = pieces["short"]
    if pieces["dirty"]:
        rendered += "-dirty"
    return rendered | 
	TAG[-DISTANCE-gHEX][-dirty].
    Like 'git describe --tags --dirty --always'.
    Exceptions:
    1: no tags. HEX[-dirty]  (note: no 'g' prefix)
     | 
	render_git_describe | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def render_git_describe_long(pieces):
    """TAG-DISTANCE-gHEX[-dirty].
    Like 'git describe --tags --dirty --always -long'.
    The distance/hash is unconditional.
    Exceptions:
    1: no tags. HEX[-dirty]  (note: no 'g' prefix)
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
    else:
        # exception #1
        rendered = pieces["short"]
    if pieces["dirty"]:
        rendered += "-dirty"
    return rendered | 
	TAG-DISTANCE-gHEX[-dirty].
    Like 'git describe --tags --dirty --always -long'.
    The distance/hash is unconditional.
    Exceptions:
    1: no tags. HEX[-dirty]  (note: no 'g' prefix)
     | 
	render_git_describe_long | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def render(pieces, style):
    """Render the given version pieces into the requested style."""
    if pieces["error"]:
        return {"version": "unknown",
                "full-revisionid": pieces.get("long"),
                "dirty": None,
                "error": pieces["error"],
                "date": None}
    if not style or style == "default":
        style = "pep440"  # the default
    if style == "pep440":
        rendered = render_pep440(pieces)
    elif style == "pep440-pre":
        rendered = render_pep440_pre(pieces)
    elif style == "pep440-post":
        rendered = render_pep440_post(pieces)
    elif style == "pep440-old":
        rendered = render_pep440_old(pieces)
    elif style == "git-describe":
        rendered = render_git_describe(pieces)
    elif style == "git-describe-long":
        rendered = render_git_describe_long(pieces)
    else:
        raise ValueError("unknown style '%s'" % style)
    return {"version": rendered, "full-revisionid": pieces["long"],
            "dirty": pieces["dirty"], "error": None,
            "date": pieces.get("date")} | 
	Render the given version pieces into the requested style. | 
	render | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def get_versions(verbose=False):
    """Get the project version from whatever source is available.
    Returns dict with two keys: 'version' and 'full'.
    """
    if "versioneer" in sys.modules:
        # see the discussion in cmdclass.py:get_cmdclass()
        del sys.modules["versioneer"]
    root = get_root()
    cfg = get_config_from_root(root)
    assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
    handlers = HANDLERS.get(cfg.VCS)
    assert handlers, "unrecognized VCS '%s'" % cfg.VCS
    verbose = verbose or cfg.verbose
    assert cfg.versionfile_source is not None, \
        "please set versioneer.versionfile_source"
    assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
    versionfile_abs = os.path.join(root, cfg.versionfile_source)
    # extract version from first of: _version.py, VCS command (e.g. 'git
    # describe'), parentdir. This is meant to work for developers using a
    # source checkout, for users of a tarball created by 'setup.py sdist',
    # and for users of a tarball/zipball created by 'git archive' or github's
    # download-from-tag feature or the equivalent in other VCSes.
    get_keywords_f = handlers.get("get_keywords")
    from_keywords_f = handlers.get("keywords")
    if get_keywords_f and from_keywords_f:
        try:
            keywords = get_keywords_f(versionfile_abs)
            ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
            if verbose:
                print("got version from expanded keyword %s" % ver)
            return ver
        except NotThisMethod:
            pass
    try:
        ver = versions_from_file(versionfile_abs)
        if verbose:
            print("got version from file %s %s" % (versionfile_abs, ver))
        return ver
    except NotThisMethod:
        pass
    from_vcs_f = handlers.get("pieces_from_vcs")
    if from_vcs_f:
        try:
            pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
            ver = render(pieces, cfg.style)
            if verbose:
                print("got version from VCS %s" % ver)
            return ver
        except NotThisMethod:
            pass
    try:
        if cfg.parentdir_prefix:
            ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
            if verbose:
                print("got version from parentdir %s" % ver)
            return ver
    except NotThisMethod:
        pass
    if verbose:
        print("unable to compute version")
    return {"version": "0+unknown", "full-revisionid": None,
            "dirty": None, "error": "unable to compute version",
            "date": None} | 
	Get the project version from whatever source is available.
    Returns dict with two keys: 'version' and 'full'.
     | 
	get_versions | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def get_cmdclass():
    """Get the custom setuptools/distutils subclasses used by Versioneer."""
    if "versioneer" in sys.modules:
        del sys.modules["versioneer"]
        # this fixes the "python setup.py develop" case (also 'install' and
        # 'easy_install .'), in which subdependencies of the main project are
        # built (using setup.py bdist_egg) in the same python process. Assume
        # a main project A and a dependency B, which use different versions
        # of Versioneer. A's setup.py imports A's Versioneer, leaving it in
        # sys.modules by the time B's setup.py is executed, causing B to run
        # with the wrong versioneer. Setuptools wraps the sub-dep builds in a
        # sandbox that restores sys.modules to it's pre-build state, so the
        # parent is protected against the child's "import versioneer". By
        # removing ourselves from sys.modules here, before the child build
        # happens, we protect the child from the parent's versioneer too.
        # Also see https://github.com/warner/python-versioneer/issues/52
    cmds = {}
    # we add "version" to both distutils and setuptools
    from distutils.core import Command
    class cmd_version(Command):
        description = "report generated version string"
        user_options = []
        boolean_options = []
        def initialize_options(self):
            pass
        def finalize_options(self):
            pass
        def run(self):
            vers = get_versions(verbose=True)
            print("Version: %s" % vers["version"])
            print(" full-revisionid: %s" % vers.get("full-revisionid"))
            print(" dirty: %s" % vers.get("dirty"))
            print(" date: %s" % vers.get("date"))
            if vers["error"]:
                print(" error: %s" % vers["error"])
    cmds["version"] = cmd_version
    # we override "build_py" in both distutils and setuptools
    #
    # most invocation pathways end up running build_py:
    #  distutils/build -> build_py
    #  distutils/install -> distutils/build ->..
    #  setuptools/bdist_wheel -> distutils/install ->..
    #  setuptools/bdist_egg -> distutils/install_lib -> build_py
    #  setuptools/install -> bdist_egg ->..
    #  setuptools/develop -> ?
    #  pip install:
    #   copies source tree to a tempdir before running egg_info/etc
    #   if .git isn't copied too, 'git describe' will fail
    #   then does setup.py bdist_wheel, or sometimes setup.py install
    #  setup.py egg_info -> ?
    # we override different "build_py" commands for both environments
    if "setuptools" in sys.modules:
        from setuptools.command.build_py import build_py as _build_py
    else:
        from distutils.command.build_py import build_py as _build_py
    class cmd_build_py(_build_py):
        def run(self):
            root = get_root()
            cfg = get_config_from_root(root)
            versions = get_versions()
            _build_py.run(self)
            # now locate _version.py in the new build/ directory and replace
            # it with an updated value
            if cfg.versionfile_build:
                target_versionfile = os.path.join(self.build_lib,
                                                  cfg.versionfile_build)
                print("UPDATING %s" % target_versionfile)
                write_to_version_file(target_versionfile, versions)
    cmds["build_py"] = cmd_build_py
    if "cx_Freeze" in sys.modules:  # cx_freeze enabled?
        from cx_Freeze.dist import build_exe as _build_exe
        # nczeczulin reports that py2exe won't like the pep440-style string
        # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
        # setup(console=[{
        #   "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
        #   "product_version": versioneer.get_version(),
        #   ...
        class cmd_build_exe(_build_exe):
            def run(self):
                root = get_root()
                cfg = get_config_from_root(root)
                versions = get_versions()
                target_versionfile = cfg.versionfile_source
                print("UPDATING %s" % target_versionfile)
                write_to_version_file(target_versionfile, versions)
                _build_exe.run(self)
                os.unlink(target_versionfile)
                with open(cfg.versionfile_source, "w") as f:
                    LONG = LONG_VERSION_PY[cfg.VCS]
                    f.write(LONG %
                            {"DOLLAR": "$",
                             "STYLE": cfg.style,
                             "TAG_PREFIX": cfg.tag_prefix,
                             "PARENTDIR_PREFIX": cfg.parentdir_prefix,
                             "VERSIONFILE_SOURCE": cfg.versionfile_source,
                             })
        cmds["build_exe"] = cmd_build_exe
        del cmds["build_py"]
    if 'py2exe' in sys.modules:  # py2exe enabled?
        try:
            from py2exe.distutils_buildexe import py2exe as _py2exe  # py3
        except ImportError:
            from py2exe.build_exe import py2exe as _py2exe  # py2
        class cmd_py2exe(_py2exe):
            def run(self):
                root = get_root()
                cfg = get_config_from_root(root)
                versions = get_versions()
                target_versionfile = cfg.versionfile_source
                print("UPDATING %s" % target_versionfile)
                write_to_version_file(target_versionfile, versions)
                _py2exe.run(self)
                os.unlink(target_versionfile)
                with open(cfg.versionfile_source, "w") as f:
                    LONG = LONG_VERSION_PY[cfg.VCS]
                    f.write(LONG %
                            {"DOLLAR": "$",
                             "STYLE": cfg.style,
                             "TAG_PREFIX": cfg.tag_prefix,
                             "PARENTDIR_PREFIX": cfg.parentdir_prefix,
                             "VERSIONFILE_SOURCE": cfg.versionfile_source,
                             })
        cmds["py2exe"] = cmd_py2exe
    # we override different "sdist" commands for both environments
    if "setuptools" in sys.modules:
        from setuptools.command.sdist import sdist as _sdist
    else:
        from distutils.command.sdist import sdist as _sdist
    class cmd_sdist(_sdist):
        def run(self):
            versions = get_versions()
            self._versioneer_generated_versions = versions
            # unless we update this, the command will keep using the old
            # version
            self.distribution.metadata.version = versions["version"]
            return _sdist.run(self)
        def make_release_tree(self, base_dir, files):
            root = get_root()
            cfg = get_config_from_root(root)
            _sdist.make_release_tree(self, base_dir, files)
            # now locate _version.py in the new base_dir directory
            # (remembering that it may be a hardlink) and replace it with an
            # updated value
            target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
            print("UPDATING %s" % target_versionfile)
            write_to_version_file(target_versionfile,
                                  self._versioneer_generated_versions)
    cmds["sdist"] = cmd_sdist
    return cmds | 
	Get the custom setuptools/distutils subclasses used by Versioneer. | 
	get_cmdclass | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def do_setup():
    """Main VCS-independent setup function for installing Versioneer."""
    root = get_root()
    try:
        cfg = get_config_from_root(root)
    except (EnvironmentError, configparser.NoSectionError,
            configparser.NoOptionError) as e:
        if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
            print("Adding sample versioneer config to setup.cfg",
                  file=sys.stderr)
            with open(os.path.join(root, "setup.cfg"), "a") as f:
                f.write(SAMPLE_CONFIG)
        print(CONFIG_ERROR, file=sys.stderr)
        return 1
    print(" creating %s" % cfg.versionfile_source)
    with open(cfg.versionfile_source, "w") as f:
        LONG = LONG_VERSION_PY[cfg.VCS]
        f.write(LONG % {"DOLLAR": "$",
                        "STYLE": cfg.style,
                        "TAG_PREFIX": cfg.tag_prefix,
                        "PARENTDIR_PREFIX": cfg.parentdir_prefix,
                        "VERSIONFILE_SOURCE": cfg.versionfile_source,
                        })
    ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
                       "__init__.py")
    if os.path.exists(ipy):
        try:
            with open(ipy, "r") as f:
                old = f.read()
        except EnvironmentError:
            old = ""
        if INIT_PY_SNIPPET not in old:
            print(" appending to %s" % ipy)
            with open(ipy, "a") as f:
                f.write(INIT_PY_SNIPPET)
        else:
            print(" %s unmodified" % ipy)
    else:
        print(" %s doesn't exist, ok" % ipy)
        ipy = None
    # Make sure both the top-level "versioneer.py" and versionfile_source
    # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
    # they'll be copied into source distributions. Pip won't be able to
    # install the package without this.
    manifest_in = os.path.join(root, "MANIFEST.in")
    simple_includes = set()
    try:
        with open(manifest_in, "r") as f:
            for line in f:
                if line.startswith("include "):
                    for include in line.split()[1:]:
                        simple_includes.add(include)
    except EnvironmentError:
        pass
    # That doesn't cover everything MANIFEST.in can do
    # (http://docs.python.org/2/distutils/sourcedist.html#commands), so
    # it might give some false negatives. Appending redundant 'include'
    # lines is safe, though.
    if "versioneer.py" not in simple_includes:
        print(" appending 'versioneer.py' to MANIFEST.in")
        with open(manifest_in, "a") as f:
            f.write("include versioneer.py\n")
    else:
        print(" 'versioneer.py' already in MANIFEST.in")
    if cfg.versionfile_source not in simple_includes:
        print(" appending versionfile_source ('%s') to MANIFEST.in" %
              cfg.versionfile_source)
        with open(manifest_in, "a") as f:
            f.write("include %s\n" % cfg.versionfile_source)
    else:
        print(" versionfile_source already in MANIFEST.in")
    # Make VCS-specific changes. For git, this means creating/changing
    # .gitattributes to mark _version.py for export-subst keyword
    # substitution.
    do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
    return 0 | 
	Main VCS-independent setup function for installing Versioneer. | 
	do_setup | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def scan_setup_py():
    """Validate the contents of setup.py against Versioneer's expectations."""
    found = set()
    setters = False
    errors = 0
    with open("setup.py", "r") as f:
        for line in f.readlines():
            if "import versioneer" in line:
                found.add("import")
            if "versioneer.get_cmdclass()" in line:
                found.add("cmdclass")
            if "versioneer.get_version()" in line:
                found.add("get_version")
            if "versioneer.VCS" in line:
                setters = True
            if "versioneer.versionfile_source" in line:
                setters = True
    if len(found) != 3:
        print("")
        print("Your setup.py appears to be missing some important items")
        print("(but I might be wrong). Please make sure it has something")
        print("roughly like the following:")
        print("")
        print(" import versioneer")
        print(" setup( version=versioneer.get_version(),")
        print("        cmdclass=versioneer.get_cmdclass(),  ...)")
        print("")
        errors += 1
    if setters:
        print("You should remove lines like 'versioneer.VCS = ' and")
        print("'versioneer.versionfile_source = ' . This configuration")
        print("now lives in setup.cfg, and should be removed from setup.py")
        print("")
        errors += 1
    return errors | 
	Validate the contents of setup.py against Versioneer's expectations. | 
	scan_setup_py | 
	python | 
	palantir/python-language-server | 
	versioneer.py | 
	https://github.com/palantir/python-language-server/blob/master/versioneer.py | 
	MIT | 
| 
	def pyls_commands(config, workspace):
    """The list of command strings supported by the server.
    Returns:
        List[str]: The supported commands.
    """ | 
	The list of command strings supported by the server.
    Returns:
        List[str]: The supported commands.
     | 
	pyls_commands | 
	python | 
	palantir/python-language-server | 
	pyls/hookspecs.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/hookspecs.py | 
	MIT | 
| 
	def __getitem__(self, item):
        """Override getitem to fallback through multiple dispatchers."""
        if self._shutdown and item != 'exit':
            # exit is the only allowed method during shutdown
            log.debug("Ignoring non-exit method during shutdown: %s", item)
            raise KeyError
        try:
            return super(PythonLanguageServer, self).__getitem__(item)
        except KeyError:
            # Fallback through extra dispatchers
            for dispatcher in self._dispatchers:
                try:
                    return dispatcher[item]
                except KeyError:
                    continue
        raise KeyError() | 
	Override getitem to fallback through multiple dispatchers. | 
	__getitem__ | 
	python | 
	palantir/python-language-server | 
	pyls/python_ls.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/python_ls.py | 
	MIT | 
| 
	def _hook(self, hook_name, doc_uri=None, **kwargs):
        """Calls hook_name and returns a list of results from all registered handlers"""
        workspace = self._match_uri_to_workspace(doc_uri)
        doc = workspace.get_document(doc_uri) if doc_uri else None
        hook_handlers = self.config.plugin_manager.subset_hook_caller(hook_name, self.config.disabled_plugins)
        return hook_handlers(config=self.config, workspace=workspace, document=doc, **kwargs) | 
	Calls hook_name and returns a list of results from all registered handlers | 
	_hook | 
	python | 
	palantir/python-language-server | 
	pyls/python_ls.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/python_ls.py | 
	MIT | 
| 
	def urlparse(uri):
    """Parse and decode the parts of a URI."""
    scheme, netloc, path, params, query, fragment = parse.urlparse(uri)
    return (
        parse.unquote(scheme),
        parse.unquote(netloc),
        parse.unquote(path),
        parse.unquote(params),
        parse.unquote(query),
        parse.unquote(fragment)
    ) | 
	Parse and decode the parts of a URI. | 
	urlparse | 
	python | 
	palantir/python-language-server | 
	pyls/uris.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/uris.py | 
	MIT | 
| 
	def urlunparse(parts):
    """Unparse and encode parts of a URI."""
    scheme, netloc, path, params, query, fragment = parts
    # Avoid encoding the windows drive letter colon
    if RE_DRIVE_LETTER_PATH.match(path):
        quoted_path = path[:3] + parse.quote(path[3:])
    else:
        quoted_path = parse.quote(path)
    return parse.urlunparse((
        parse.quote(scheme),
        parse.quote(netloc),
        quoted_path,
        parse.quote(params),
        parse.quote(query),
        parse.quote(fragment)
    )) | 
	Unparse and encode parts of a URI. | 
	urlunparse | 
	python | 
	palantir/python-language-server | 
	pyls/uris.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/uris.py | 
	MIT | 
| 
	def to_fs_path(uri):
    """Returns the filesystem path of the given URI.
    Will handle UNC paths and normalize windows drive letters to lower-case. Also
    uses the platform specific path separator. Will *not* validate the path for
    invalid characters and semantics. Will *not* look at the scheme of this URI.
    """
    # scheme://netloc/path;parameters?query#fragment
    scheme, netloc, path, _params, _query, _fragment = urlparse(uri)
    if netloc and path and scheme == 'file':
        # unc path: file://shares/c$/far/boo
        value = "//{}{}".format(netloc, path)
    elif RE_DRIVE_LETTER_PATH.match(path):
        # windows drive letter: file:///C:/far/boo
        value = path[1].lower() + path[2:]
    else:
        # Other path
        value = path
    if IS_WIN:
        value = value.replace('/', '\\')
    return value | 
	Returns the filesystem path of the given URI.
    Will handle UNC paths and normalize windows drive letters to lower-case. Also
    uses the platform specific path separator. Will *not* validate the path for
    invalid characters and semantics. Will *not* look at the scheme of this URI.
     | 
	to_fs_path | 
	python | 
	palantir/python-language-server | 
	pyls/uris.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/uris.py | 
	MIT | 
| 
	def from_fs_path(path):
    """Returns a URI for the given filesystem path."""
    scheme = 'file'
    params, query, fragment = '', '', ''
    path, netloc = _normalize_win_path(path)
    return urlunparse((scheme, netloc, path, params, query, fragment)) | 
	Returns a URI for the given filesystem path. | 
	from_fs_path | 
	python | 
	palantir/python-language-server | 
	pyls/uris.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/uris.py | 
	MIT | 
| 
	def uri_with(uri, scheme=None, netloc=None, path=None, params=None, query=None, fragment=None):
    """Return a URI with the given part(s) replaced.
    Parts are decoded / encoded.
    """
    old_scheme, old_netloc, old_path, old_params, old_query, old_fragment = urlparse(uri)
    path, _netloc = _normalize_win_path(path)
    return urlunparse((
        scheme or old_scheme,
        netloc or old_netloc,
        path or old_path,
        params or old_params,
        query or old_query,
        fragment or old_fragment
    )) | 
	Return a URI with the given part(s) replaced.
    Parts are decoded / encoded.
     | 
	uri_with | 
	python | 
	palantir/python-language-server | 
	pyls/uris.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/uris.py | 
	MIT | 
| 
	def lock(method):
    """Define an atomic region over a method."""
    @functools.wraps(method)
    def wrapper(self, *args, **kwargs):
        with self._lock:
            return method(self, *args, **kwargs)
    return wrapper | 
	Define an atomic region over a method. | 
	lock | 
	python | 
	palantir/python-language-server | 
	pyls/workspace.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/workspace.py | 
	MIT | 
| 
	def source_roots(self, document_path):
        """Return the source roots for the given document."""
        files = _utils.find_parents(self._root_path, document_path, ['setup.py', 'pyproject.toml']) or []
        return list({os.path.dirname(project_file) for project_file in files}) or [self._root_path] | 
	Return the source roots for the given document. | 
	source_roots | 
	python | 
	palantir/python-language-server | 
	pyls/workspace.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/workspace.py | 
	MIT | 
| 
	def word_at_position(self, position):
        """Get the word under the cursor returning the start and end positions."""
        if position['line'] >= len(self.lines):
            return ''
        line = self.lines[position['line']]
        i = position['character']
        # Split word in two
        start = line[:i]
        end = line[i:]
        # Take end of start and start of end to find word
        # These are guaranteed to match, even if they match the empty string
        m_start = RE_START_WORD.findall(start)
        m_end = RE_END_WORD.findall(end)
        return m_start[0] + m_end[-1] | 
	Get the word under the cursor returning the start and end positions. | 
	word_at_position | 
	python | 
	palantir/python-language-server | 
	pyls/workspace.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/workspace.py | 
	MIT | 
| 
	def debounce(interval_s, keyed_by=None):
    """Debounce calls to this function until interval_s seconds have passed."""
    def wrapper(func):
        timers = {}
        lock = threading.Lock()
        @functools.wraps(func)
        def debounced(*args, **kwargs):
            call_args = inspect.getcallargs(func, *args, **kwargs)
            key = call_args[keyed_by] if keyed_by else None
            def run():
                with lock:
                    del timers[key]
                return func(*args, **kwargs)
            with lock:
                old_timer = timers.get(key)
                if old_timer:
                    old_timer.cancel()
                timer = threading.Timer(interval_s, run)
                timers[key] = timer
                timer.start()
        return debounced
    return wrapper | 
	Debounce calls to this function until interval_s seconds have passed. | 
	debounce | 
	python | 
	palantir/python-language-server | 
	pyls/_utils.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_utils.py | 
	MIT | 
| 
	def find_parents(root, path, names):
    """Find files matching the given names relative to the given path.
    Args:
        path (str): The file path to start searching up from.
        names (List[str]): The file/directory names to look for.
        root (str): The directory at which to stop recursing upwards.
    Note:
        The path MUST be within the root.
    """
    if not root:
        return []
    if not os.path.commonprefix((root, path)):
        log.warning("Path %s not in %s", path, root)
        return []
    # Split the relative by directory, generate all the parent directories, then check each of them.
    # This avoids running a loop that has different base-cases for unix/windows
    # e.g. /a/b and /a/b/c/d/e.py -> ['/a/b', 'c', 'd']
    dirs = [root] + os.path.relpath(os.path.dirname(path), root).split(os.path.sep)
    # Search each of /a/b/c, /a/b, /a
    while dirs:
        search_dir = os.path.join(*dirs)
        existing = list(filter(os.path.exists, [os.path.join(search_dir, n) for n in names]))
        if existing:
            return existing
        dirs.pop()
    # Otherwise nothing
    return [] | 
	Find files matching the given names relative to the given path.
    Args:
        path (str): The file path to start searching up from.
        names (List[str]): The file/directory names to look for.
        root (str): The directory at which to stop recursing upwards.
    Note:
        The path MUST be within the root.
     | 
	find_parents | 
	python | 
	palantir/python-language-server | 
	pyls/_utils.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_utils.py | 
	MIT | 
| 
	def path_to_dot_name(path):
    """Given a path to a module, derive its dot-separated full name."""
    directory = os.path.dirname(path)
    module_name, _ = os.path.splitext(os.path.basename(path))
    full_name = [module_name]
    while os.path.exists(os.path.join(directory, '__init__.py')):
        this_directory = os.path.basename(directory)
        directory = os.path.dirname(directory)
        full_name = [this_directory] + full_name
    return '.'.join(full_name) | 
	Given a path to a module, derive its dot-separated full name. | 
	path_to_dot_name | 
	python | 
	palantir/python-language-server | 
	pyls/_utils.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_utils.py | 
	MIT | 
| 
	def merge_dicts(dict_a, dict_b):
    """Recursively merge dictionary b into dictionary a.
    If override_nones is True, then
    """
    def _merge_dicts_(a, b):
        for key in set(a.keys()).union(b.keys()):
            if key in a and key in b:
                if isinstance(a[key], dict) and isinstance(b[key], dict):
                    yield (key, dict(_merge_dicts_(a[key], b[key])))
                elif b[key] is not None:
                    yield (key, b[key])
                else:
                    yield (key, a[key])
            elif key in a:
                yield (key, a[key])
            elif b[key] is not None:
                yield (key, b[key])
    return dict(_merge_dicts_(dict_a, dict_b)) | 
	Recursively merge dictionary b into dictionary a.
    If override_nones is True, then
     | 
	merge_dicts | 
	python | 
	palantir/python-language-server | 
	pyls/_utils.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_utils.py | 
	MIT | 
| 
	def format_docstring(contents):
    """Python doc strings come in a number of formats, but LSP wants markdown.
    Until we can find a fast enough way of discovering and parsing each format,
    we can do a little better by at least preserving indentation.
    """
    contents = contents.replace('\t', u'\u00A0' * 4)
    contents = contents.replace('  ', u'\u00A0' * 2)
    return contents | 
	Python doc strings come in a number of formats, but LSP wants markdown.
    Until we can find a fast enough way of discovering and parsing each format,
    we can do a little better by at least preserving indentation.
     | 
	format_docstring | 
	python | 
	palantir/python-language-server | 
	pyls/_utils.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_utils.py | 
	MIT | 
| 
	def clip_column(column, lines, line_number):
    """
    Normalise the position as per the LSP that accepts character positions > line length
    https://microsoft.github.io/language-server-protocol/specification#position
    """
    max_column = len(lines[line_number].rstrip('\r\n')) if len(lines) > line_number else 0
    return min(column, max_column) | 
	
    Normalise the position as per the LSP that accepts character positions > line length
    https://microsoft.github.io/language-server-protocol/specification#position
     | 
	clip_column | 
	python | 
	palantir/python-language-server | 
	pyls/_utils.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_utils.py | 
	MIT | 
| 
	def position_to_jedi_linecolumn(document, position):
    """
    Convert the LSP format 'line', 'character' to Jedi's 'line', 'column'
    https://microsoft.github.io/language-server-protocol/specification#position
    """
    code_position = {}
    if position:
        code_position = {'line': position['line'] + 1,
                         'column': clip_column(position['character'],
                                               document.lines,
                                               position['line'])}
    return code_position | 
	
    Convert the LSP format 'line', 'character' to Jedi's 'line', 'column'
    https://microsoft.github.io/language-server-protocol/specification#position
     | 
	position_to_jedi_linecolumn | 
	python | 
	palantir/python-language-server | 
	pyls/_utils.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_utils.py | 
	MIT | 
| 
	def is_process_alive(pid):
        """Check whether the process with the given pid is still alive.
        Running `os.kill()` on Windows always exits the process, so it can't be used to check for an alive process.
        see: https://docs.python.org/3/library/os.html?highlight=os%20kill#os.kill
        Hence ctypes is used to check for the process directly via windows API avoiding any other 3rd-party dependency.
        Args:
            pid (int): process ID
        Returns:
            bool: False if the process is not alive or don't have permission to check, True otherwise.
        """
        process = kernel32.OpenProcess(PROCESS_QUERY_INFROMATION, 0, pid)
        if process != 0:
            kernel32.CloseHandle(process)
            return True
        return False | 
	Check whether the process with the given pid is still alive.
        Running `os.kill()` on Windows always exits the process, so it can't be used to check for an alive process.
        see: https://docs.python.org/3/library/os.html?highlight=os%20kill#os.kill
        Hence ctypes is used to check for the process directly via windows API avoiding any other 3rd-party dependency.
        Args:
            pid (int): process ID
        Returns:
            bool: False if the process is not alive or don't have permission to check, True otherwise.
         | 
	is_process_alive | 
	python | 
	palantir/python-language-server | 
	pyls/_utils.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_utils.py | 
	MIT | 
| 
	def is_process_alive(pid):
        """Check whether the process with the given pid is still alive.
        Args:
            pid (int): process ID
        Returns:
            bool: False if the process is not alive or don't have permission to check, True otherwise.
        """
        if pid < 0:
            return False
        try:
            os.kill(pid, 0)
        except OSError as e:
            return e.errno == errno.EPERM
        else:
            return True | 
	Check whether the process with the given pid is still alive.
        Args:
            pid (int): process ID
        Returns:
            bool: False if the process is not alive or don't have permission to check, True otherwise.
         | 
	is_process_alive | 
	python | 
	palantir/python-language-server | 
	pyls/_utils.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_utils.py | 
	MIT | 
| 
	def get_keywords():
    """Get the keywords needed to look up the version information."""
    # these strings will be replaced by git during git-archive.
    # setup.py/versioneer.py will grep for the variable names, so they must
    # each be defined on a line of their own. _version.py will just call
    # get_keywords().
    git_refnames = "$Format:%d$"
    git_full = "$Format:%H$"
    git_date = "$Format:%ci$"
    keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
    return keywords | 
	Get the keywords needed to look up the version information. | 
	get_keywords | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def get_config():
    """Create, populate and return the VersioneerConfig() object."""
    # these strings are filled in when 'setup.py versioneer' creates
    # _version.py
    cfg = VersioneerConfig()
    cfg.VCS = "git"
    cfg.style = "pep440"
    cfg.tag_prefix = ""
    cfg.parentdir_prefix = ""
    cfg.versionfile_source = "pyls/_version.py"
    cfg.verbose = False
    return cfg | 
	Create, populate and return the VersioneerConfig() object. | 
	get_config | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def register_vcs_handler(vcs, method):  # decorator
    """Decorator to mark a method as the handler for a particular VCS."""
    def decorate(f):
        """Store f in HANDLERS[vcs][method]."""
        if vcs not in HANDLERS:
            HANDLERS[vcs] = {}
        HANDLERS[vcs][method] = f
        return f
    return decorate | 
	Decorator to mark a method as the handler for a particular VCS. | 
	register_vcs_handler | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def versions_from_parentdir(parentdir_prefix, root, verbose):
    """Try to determine the version from the parent directory name.
    Source tarballs conventionally unpack into a directory that includes both
    the project name and a version string. We will also support searching up
    two directory levels for an appropriately named parent directory
    """
    rootdirs = []
    for i in range(3):
        dirname = os.path.basename(root)
        if dirname.startswith(parentdir_prefix):
            return {"version": dirname[len(parentdir_prefix):],
                    "full-revisionid": None,
                    "dirty": False, "error": None, "date": None}
        else:
            rootdirs.append(root)
            root = os.path.dirname(root)  # up a level
    if verbose:
        print("Tried directories %s but none started with prefix %s" %
              (str(rootdirs), parentdir_prefix))
    raise NotThisMethod("rootdir doesn't start with parentdir_prefix") | 
	Try to determine the version from the parent directory name.
    Source tarballs conventionally unpack into a directory that includes both
    the project name and a version string. We will also support searching up
    two directory levels for an appropriately named parent directory
     | 
	versions_from_parentdir | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def git_get_keywords(versionfile_abs):
    """Extract version information from the given file."""
    # the code embedded in _version.py can just fetch the value of these
    # keywords. When used from setup.py, we don't want to import _version.py,
    # so we do it with a regexp instead. This function is not used from
    # _version.py.
    keywords = {}
    try:
        f = open(versionfile_abs, "r")
        for line in f.readlines():
            if line.strip().startswith("git_refnames ="):
                mo = re.search(r'=\s*"(.*)"', line)
                if mo:
                    keywords["refnames"] = mo.group(1)
            if line.strip().startswith("git_full ="):
                mo = re.search(r'=\s*"(.*)"', line)
                if mo:
                    keywords["full"] = mo.group(1)
            if line.strip().startswith("git_date ="):
                mo = re.search(r'=\s*"(.*)"', line)
                if mo:
                    keywords["date"] = mo.group(1)
        f.close()
    except EnvironmentError:
        pass
    return keywords | 
	Extract version information from the given file. | 
	git_get_keywords | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def git_versions_from_keywords(keywords, tag_prefix, verbose):
    """Get version information from git keywords."""
    if not keywords:
        raise NotThisMethod("no keywords at all, weird")
    date = keywords.get("date")
    if date is not None:
        # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
        # datestamp. However we prefer "%ci" (which expands to an "ISO-8601
        # -like" string, which we must then edit to make compliant), because
        # it's been around since git-1.5.3, and it's too difficult to
        # discover which version we're using, or to work around using an
        # older one.
        date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
    refnames = keywords["refnames"].strip()
    if refnames.startswith("$Format"):
        if verbose:
            print("keywords are unexpanded, not using")
        raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
    refs = set([r.strip() for r in refnames.strip("()").split(",")])
    # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
    # just "foo-1.0". If we see a "tag: " prefix, prefer those.
    TAG = "tag: "
    tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
    if not tags:
        # Either we're using git < 1.8.3, or there really are no tags. We use
        # a heuristic: assume all version tags have a digit. The old git %d
        # expansion behaves like git log --decorate=short and strips out the
        # refs/heads/ and refs/tags/ prefixes that would let us distinguish
        # between branches and tags. By ignoring refnames without digits, we
        # filter out many common branch names like "release" and
        # "stabilization", as well as "HEAD" and "master".
        tags = set([r for r in refs if re.search(r'\d', r)])
        if verbose:
            print("discarding '%s', no digits" % ",".join(refs - tags))
    if verbose:
        print("likely tags: %s" % ",".join(sorted(tags)))
    for ref in sorted(tags):
        # sorting will prefer e.g. "2.0" over "2.0rc1"
        if ref.startswith(tag_prefix):
            r = ref[len(tag_prefix):]
            if verbose:
                print("picking %s" % r)
            return {"version": r,
                    "full-revisionid": keywords["full"].strip(),
                    "dirty": False, "error": None,
                    "date": date}
    # no suitable tags, so version is "0+unknown", but full hex is still there
    if verbose:
        print("no suitable tags, using unknown + full revision id")
    return {"version": "0+unknown",
            "full-revisionid": keywords["full"].strip(),
            "dirty": False, "error": "no suitable tags", "date": None} | 
	Get version information from git keywords. | 
	git_versions_from_keywords | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
    """Get version from 'git describe' in the root of the source tree.
    This only gets called if the git-archive 'subst' keywords were *not*
    expanded, and _version.py hasn't already been rewritten with a short
    version string, meaning we're inside a checked out source tree.
    """
    GITS = ["git"]
    if sys.platform == "win32":
        GITS = ["git.cmd", "git.exe"]
    out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
                          hide_stderr=True)
    if rc != 0:
        if verbose:
            print("Directory %s not under git control" % root)
        raise NotThisMethod("'git rev-parse --git-dir' returned error")
    # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
    # if there isn't one, this yields HEX[-dirty] (no NUM)
    describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
                                          "--always", "--long",
                                          "--match", "%s*" % tag_prefix],
                                   cwd=root)
    # --long was added in git-1.5.5
    if describe_out is None:
        raise NotThisMethod("'git describe' failed")
    describe_out = describe_out.strip()
    full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
    if full_out is None:
        raise NotThisMethod("'git rev-parse' failed")
    full_out = full_out.strip()
    pieces = {}
    pieces["long"] = full_out
    pieces["short"] = full_out[:7]  # maybe improved later
    pieces["error"] = None
    # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
    # TAG might have hyphens.
    git_describe = describe_out
    # look for -dirty suffix
    dirty = git_describe.endswith("-dirty")
    pieces["dirty"] = dirty
    if dirty:
        git_describe = git_describe[:git_describe.rindex("-dirty")]
    # now we have TAG-NUM-gHEX or HEX
    if "-" in git_describe:
        # TAG-NUM-gHEX
        mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
        if not mo:
            # unparseable. Maybe git-describe is misbehaving?
            pieces["error"] = ("unable to parse git-describe output: '%s'"
                               % describe_out)
            return pieces
        # tag
        full_tag = mo.group(1)
        if not full_tag.startswith(tag_prefix):
            if verbose:
                fmt = "tag '%s' doesn't start with prefix '%s'"
                print(fmt % (full_tag, tag_prefix))
            pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
                               % (full_tag, tag_prefix))
            return pieces
        pieces["closest-tag"] = full_tag[len(tag_prefix):]
        # distance: number of commits since tag
        pieces["distance"] = int(mo.group(2))
        # commit: short hex revision ID
        pieces["short"] = mo.group(3)
    else:
        # HEX: no tags
        pieces["closest-tag"] = None
        count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
                                    cwd=root)
        pieces["distance"] = int(count_out)  # total number of commits
    # commit date: see ISO-8601 comment in git_versions_from_keywords()
    date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
                       cwd=root)[0].strip()
    pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
    return pieces | 
	Get version from 'git describe' in the root of the source tree.
    This only gets called if the git-archive 'subst' keywords were *not*
    expanded, and _version.py hasn't already been rewritten with a short
    version string, meaning we're inside a checked out source tree.
     | 
	git_pieces_from_vcs | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def plus_or_dot(pieces):
    """Return a + if we don't already have one, else return a ."""
    if "+" in pieces.get("closest-tag", ""):
        return "."
    return "+" | 
	Return a + if we don't already have one, else return a . | 
	plus_or_dot | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def render_pep440(pieces):
    """Build up version string, with post-release "local version identifier".
    Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
    get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
    Exceptions:
    1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        if pieces["distance"] or pieces["dirty"]:
            rendered += plus_or_dot(pieces)
            rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
            if pieces["dirty"]:
                rendered += ".dirty"
    else:
        # exception #1
        rendered = "0+untagged.%d.g%s" % (pieces["distance"],
                                          pieces["short"])
        if pieces["dirty"]:
            rendered += ".dirty"
    return rendered | 
	Build up version string, with post-release "local version identifier".
    Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
    get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
    Exceptions:
    1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
     | 
	render_pep440 | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def render_pep440_pre(pieces):
    """TAG[.post.devDISTANCE] -- No -dirty.
    Exceptions:
    1: no tags. 0.post.devDISTANCE
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        if pieces["distance"]:
            rendered += ".post.dev%d" % pieces["distance"]
    else:
        # exception #1
        rendered = "0.post.dev%d" % pieces["distance"]
    return rendered | 
	TAG[.post.devDISTANCE] -- No -dirty.
    Exceptions:
    1: no tags. 0.post.devDISTANCE
     | 
	render_pep440_pre | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def render_pep440_post(pieces):
    """TAG[.postDISTANCE[.dev0]+gHEX] .
    The ".dev0" means dirty. Note that .dev0 sorts backwards
    (a dirty tree will appear "older" than the corresponding clean one),
    but you shouldn't be releasing software with -dirty anyways.
    Exceptions:
    1: no tags. 0.postDISTANCE[.dev0]
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        if pieces["distance"] or pieces["dirty"]:
            rendered += ".post%d" % pieces["distance"]
            if pieces["dirty"]:
                rendered += ".dev0"
            rendered += plus_or_dot(pieces)
            rendered += "g%s" % pieces["short"]
    else:
        # exception #1
        rendered = "0.post%d" % pieces["distance"]
        if pieces["dirty"]:
            rendered += ".dev0"
        rendered += "+g%s" % pieces["short"]
    return rendered | 
	TAG[.postDISTANCE[.dev0]+gHEX] .
    The ".dev0" means dirty. Note that .dev0 sorts backwards
    (a dirty tree will appear "older" than the corresponding clean one),
    but you shouldn't be releasing software with -dirty anyways.
    Exceptions:
    1: no tags. 0.postDISTANCE[.dev0]
     | 
	render_pep440_post | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def render_pep440_old(pieces):
    """TAG[.postDISTANCE[.dev0]] .
    The ".dev0" means dirty.
    Eexceptions:
    1: no tags. 0.postDISTANCE[.dev0]
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        if pieces["distance"] or pieces["dirty"]:
            rendered += ".post%d" % pieces["distance"]
            if pieces["dirty"]:
                rendered += ".dev0"
    else:
        # exception #1
        rendered = "0.post%d" % pieces["distance"]
        if pieces["dirty"]:
            rendered += ".dev0"
    return rendered | 
	TAG[.postDISTANCE[.dev0]] .
    The ".dev0" means dirty.
    Eexceptions:
    1: no tags. 0.postDISTANCE[.dev0]
     | 
	render_pep440_old | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def render_git_describe(pieces):
    """TAG[-DISTANCE-gHEX][-dirty].
    Like 'git describe --tags --dirty --always'.
    Exceptions:
    1: no tags. HEX[-dirty]  (note: no 'g' prefix)
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        if pieces["distance"]:
            rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
    else:
        # exception #1
        rendered = pieces["short"]
    if pieces["dirty"]:
        rendered += "-dirty"
    return rendered | 
	TAG[-DISTANCE-gHEX][-dirty].
    Like 'git describe --tags --dirty --always'.
    Exceptions:
    1: no tags. HEX[-dirty]  (note: no 'g' prefix)
     | 
	render_git_describe | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def render_git_describe_long(pieces):
    """TAG-DISTANCE-gHEX[-dirty].
    Like 'git describe --tags --dirty --always -long'.
    The distance/hash is unconditional.
    Exceptions:
    1: no tags. HEX[-dirty]  (note: no 'g' prefix)
    """
    if pieces["closest-tag"]:
        rendered = pieces["closest-tag"]
        rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
    else:
        # exception #1
        rendered = pieces["short"]
    if pieces["dirty"]:
        rendered += "-dirty"
    return rendered | 
	TAG-DISTANCE-gHEX[-dirty].
    Like 'git describe --tags --dirty --always -long'.
    The distance/hash is unconditional.
    Exceptions:
    1: no tags. HEX[-dirty]  (note: no 'g' prefix)
     | 
	render_git_describe_long | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def render(pieces, style):
    """Render the given version pieces into the requested style."""
    if pieces["error"]:
        return {"version": "unknown",
                "full-revisionid": pieces.get("long"),
                "dirty": None,
                "error": pieces["error"],
                "date": None}
    if not style or style == "default":
        style = "pep440"  # the default
    if style == "pep440":
        rendered = render_pep440(pieces)
    elif style == "pep440-pre":
        rendered = render_pep440_pre(pieces)
    elif style == "pep440-post":
        rendered = render_pep440_post(pieces)
    elif style == "pep440-old":
        rendered = render_pep440_old(pieces)
    elif style == "git-describe":
        rendered = render_git_describe(pieces)
    elif style == "git-describe-long":
        rendered = render_git_describe_long(pieces)
    else:
        raise ValueError("unknown style '%s'" % style)
    return {"version": rendered, "full-revisionid": pieces["long"],
            "dirty": pieces["dirty"], "error": None,
            "date": pieces.get("date")} | 
	Render the given version pieces into the requested style. | 
	render | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def get_versions():
    """Get version information or return default if unable to do so."""
    # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
    # __file__, we can work backwards from there to the root. Some
    # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
    # case we can only use expanded keywords.
    cfg = get_config()
    verbose = cfg.verbose
    try:
        return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
                                          verbose)
    except NotThisMethod:
        pass
    try:
        root = os.path.realpath(__file__)
        # versionfile_source is the relative path from the top of the source
        # tree (where the .git directory might live) to this file. Invert
        # this to find the root from __file__.
        for i in cfg.versionfile_source.split('/'):
            root = os.path.dirname(root)
    except NameError:
        return {"version": "0+unknown", "full-revisionid": None,
                "dirty": None,
                "error": "unable to find root of source tree",
                "date": None}
    try:
        pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
        return render(pieces, cfg.style)
    except NotThisMethod:
        pass
    try:
        if cfg.parentdir_prefix:
            return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
    except NotThisMethod:
        pass
    return {"version": "0+unknown", "full-revisionid": None,
            "dirty": None,
            "error": "unable to compute version", "date": None} | 
	Get version information or return default if unable to do so. | 
	get_versions | 
	python | 
	palantir/python-language-server | 
	pyls/_version.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/_version.py | 
	MIT | 
| 
	def _binary_stdio():
    """Construct binary stdio streams (not text mode).
    This seems to be different for Window/Unix Python2/3, so going by:
        https://stackoverflow.com/questions/2850893/reading-binary-data-from-stdin
    """
    PY3K = sys.version_info >= (3, 0)
    if PY3K:
        # pylint: disable=no-member
        stdin, stdout = sys.stdin.buffer, sys.stdout.buffer
    else:
        # Python 2 on Windows opens sys.stdin in text mode, and
        # binary data that read from it becomes corrupted on \r\n
        if sys.platform == "win32":
            # set sys.stdin to binary mode
            # pylint: disable=no-member,import-error
            import os
            import msvcrt
            msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
            msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
        stdin, stdout = sys.stdin, sys.stdout
    return stdin, stdout | 
	Construct binary stdio streams (not text mode).
    This seems to be different for Window/Unix Python2/3, so going by:
        https://stackoverflow.com/questions/2850893/reading-binary-data-from-stdin
     | 
	_binary_stdio | 
	python | 
	palantir/python-language-server | 
	pyls/__main__.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/__main__.py | 
	MIT | 
| 
	def settings(self, document_path=None):
        """Settings are constructed from a few sources:
            1. User settings, found in user's home directory
            2. Plugin settings, reported by PyLS plugins
            3. LSP settings, given to us from didChangeConfiguration
            4. Project settings, found in config files in the current project.
        Since this function is nondeterministic, it is important to call
        settings.cache_clear() when the config is updated
        """
        settings = {}
        sources = self._settings.get('configurationSources', DEFAULT_CONFIG_SOURCES)
        # Plugin configuration
        settings = _utils.merge_dicts(settings, self._plugin_settings)
        # LSP configuration
        settings = _utils.merge_dicts(settings, self._settings)
        # User configuration
        for source_name in reversed(sources):
            source = self._config_sources.get(source_name)
            if not source:
                continue
            source_conf = source.user_config()
            log.debug("Got user config from %s: %s", source.__class__.__name__, source_conf)
            settings = _utils.merge_dicts(settings, source_conf)
        # Project configuration
        for source_name in reversed(sources):
            source = self._config_sources.get(source_name)
            if not source:
                continue
            source_conf = source.project_config(document_path or self._root_path)
            log.debug("Got project config from %s: %s", source.__class__.__name__, source_conf)
            settings = _utils.merge_dicts(settings, source_conf)
        log.debug("With configuration: %s", settings)
        return settings | 
	Settings are constructed from a few sources:
            1. User settings, found in user's home directory
            2. Plugin settings, reported by PyLS plugins
            3. LSP settings, given to us from didChangeConfiguration
            4. Project settings, found in config files in the current project.
        Since this function is nondeterministic, it is important to call
        settings.cache_clear() when the config is updated
         | 
	settings | 
	python | 
	palantir/python-language-server | 
	pyls/config/config.py | 
	https://github.com/palantir/python-language-server/blob/master/pyls/config/config.py | 
	MIT | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
