repo_name
stringlengths
2
55
dataset
stringclasses
1 value
owner
stringlengths
3
31
lang
stringclasses
10 values
func_name
stringlengths
1
104
code
stringlengths
20
96.7k
docstring
stringlengths
1
4.92k
url
stringlengths
94
241
sha
stringlengths
40
40
OpenComplex
github_2023
baaihealth
python
between_residue_clash_loss
def between_residue_clash_loss( atom23_pred_positions: torch.Tensor, atom23_atom_exists: torch.Tensor, atom23_atom_radius: torch.Tensor, residue_index: torch.Tensor, overlap_tolerance_soft=1.5, overlap_tolerance_hard=1.5, eps=1e-10, ) -> Dict[str, torch.Tensor]: """Loss to penalize steric clashes between residues. This is a loss penalizing any steric clashes due to non bonded atoms in different peptides coming too close. This loss corresponds to the part with different residues of Jumper et al. (2021) Suppl. Sec. 1.9.11, eq 46. Args: atom14_pred_positions: Predicted positions of atoms in global prediction frame atom14_atom_exists: Mask denoting whether atom at positions exists for given amino acid type atom14_atom_radius: Van der Waals radius for each atom. residue_index: Residue index for given amino acid. overlap_tolerance_soft: Soft tolerance factor. overlap_tolerance_hard: Hard tolerance factor. Returns: Dict containing: * 'mean_loss': average clash loss * 'per_atom_loss_sum': sum of all clash losses per atom, shape (N, 23) * 'per_atom_clash_mask': mask whether atom clashes with any other atom shape (N, 23) """ fp_type = atom23_pred_positions.dtype # Create the distance matrix. # (N, N, 23, 23) dists = torch.sqrt( eps + torch.sum( ( atom23_pred_positions[..., :, None, :, None, :] - atom23_pred_positions[..., None, :, None, :, :] ) ** 2, dim=-1, ) ) # Create the mask for valid distances. # shape (N, N, 23, 23) dists_mask = ( atom23_atom_exists[..., :, None, :, None] * atom23_atom_exists[..., None, :, None, :] ).type(fp_type) # Mask out all the duplicate entries in the lower triangular matrix. # Also mask out the diagonal (atom-pairs from the same residue) -- these atoms # are handled separately. dists_mask = dists_mask * ( residue_index[..., :, None, None, None] < residue_index[..., None, :, None, None] ) # Backbone O3'--P bond between subsequent residues is no clash. o3_one_hot = torch.nn.functional.one_hot( residue_index.new_tensor(6), num_classes=23 ) o3_one_hot = o3_one_hot.reshape( *((1,) * len(residue_index.shape[:-1])), *o3_one_hot.shape ) o3_one_hot = o3_one_hot.type(fp_type) p_one_hot = torch.nn.functional.one_hot( residue_index.new_tensor(8), num_classes=23 ) p_one_hot = p_one_hot.reshape( *((1,) * len(residue_index.shape[:-1])), *p_one_hot.shape ) p_one_hot = p_one_hot.type(fp_type) neighbour_mask = ( residue_index[..., :, None, None, None] + 1 ) == residue_index[..., None, :, None, None] o3_p_bonds = ( neighbour_mask * o3_one_hot[..., None, None, :, None] * p_one_hot[..., None, None, None, :] ) dists_mask = dists_mask * (1.0 - o3_p_bonds) # Compute the lower bound for the allowed distances. # shape (N, N, 23, 23) dists_lower_bound = dists_mask * ( atom23_atom_radius[..., :, None, :, None] + atom23_atom_radius[..., None, :, None, :] ) # Compute the error. # shape (N, N, 23, 23) dists_to_low_error = dists_mask * torch.nn.functional.relu( dists_lower_bound - overlap_tolerance_soft - dists ) # Compute the mean loss. # shape () mean_loss = torch.sum(dists_to_low_error) / (1e-6 + torch.sum(dists_mask)) # Compute the per atom loss sum. # shape (N, 23) per_atom_loss_sum = torch.sum(dists_to_low_error, dim=(-4, -2)) + torch.sum( dists_to_low_error, axis=(-3, -1) ) # Compute the hard clash mask. # shape (N, N, 23, 23) clash_mask = dists_mask * ( dists < (dists_lower_bound - overlap_tolerance_hard) ) # Compute the per atom clash. # shape (N, 23) per_atom_clash_mask = torch.maximum( torch.amax(clash_mask, axis=(-4, -2)), torch.amax(clash_mask, axis=(-3, -1)), ) return { "mean_loss": mean_loss, # shape () "per_atom_loss_sum": per_atom_loss_sum, # shape (N, 23) "per_atom_clash_mask": per_atom_clash_mask, # shape (N, 23) }
"""Loss to penalize steric clashes between residues. This is a loss penalizing any steric clashes due to non bonded atoms in different peptides coming too close. This loss corresponds to the part with different residues of Jumper et al. (2021) Suppl. Sec. 1.9.11, eq 46. Args: atom14_pred_positions: Predicted positions of atoms in global prediction frame atom14_atom_exists: Mask denoting whether atom at positions exists for given amino acid type atom14_atom_radius: Van der Waals radius for each atom. residue_index: Residue index for given amino acid. overlap_tolerance_soft: Soft tolerance factor. overlap_tolerance_hard: Hard tolerance factor. Returns: Dict containing: * 'mean_loss': average clash loss * 'per_atom_loss_sum': sum of all clash losses per atom, shape (N, 23) * 'per_atom_clash_mask': mask whether atom clashes with any other atom shape (N, 23) """
https://github.com/baaihealth/OpenComplex/blob/ce0d5b97b154e992b7abb10403b2ad49f850ea6f/opencomplex/loss/loss_fns_rna.py#L180-L310
ce0d5b97b154e992b7abb10403b2ad49f850ea6f
opshin
github_2023
OpShin
python
apply_parameter
def apply_parameter(self, *args: pycardano.Datum): """ Returns a new OpShin Contract with the applied parameters """ # update the parameters in the blueprint (remove applied parameters) assert len(self.parameter_types) >= len( args ), f"Applying too many parameters to contract, allowed amount: {self.parameter_types}, but got {len(args)}" new_parameter_types = copy.copy(self.parameter_types) for _ in args: # TODO validate that the applied parameters are of the correct type new_parameter_types.pop(0) new_contract_contract = apply_parameters(self.contract, *args) new_contract = PlutusContract( new_contract_contract, self.datum_type, self.redeemer_type, new_parameter_types, self.purpose, self.version, self.title, self.description, ) return new_contract
""" Returns a new OpShin Contract with the applied parameters """
https://github.com/OpShin/opshin/blob/d657a227f02670e6b6eed9cac77c0f8a25d51423/opshin/builder.py#L146-L169
d657a227f02670e6b6eed9cac77c0f8a25d51423
NeMo-Framework-Launcher
github_2023
NVIDIA
python
LM.loglikelihood_rolling
@abc.abstractmethod def loglikelihood_rolling(self, requests): """Compute full log-likelihood of a string, with no truncation, for perplexity computation - We will use the full max context length of the model. - For inputs that exceed the max context length, we divide the tokenized string into chunks of up to the max context length. - IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations which may simply concatenate multiple documents together. - IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into multiple chunks, the last input will still a full-sized context. Example: Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ] Prefix: EOT Max context length: 4 Resulting input/prediction pairs: INPUT: EOT 0 1 2 PRED: 0 1 2 3 INPUT: 3 4 5 6 PRED: 4 5 6 7 INPUT: 5 6 7 8 PRED: 8 9 Observe that: 1. Each token is predicted exactly once 2. For the last pair, we provide the full context, but only score the last two tokens :param requests: list A list of strings string: str String for which we are computing per-toke loglikelihood :return: list A list of pairs (logprob, isgreedy) logprob: float The log probability of `continuation` isgreedy: Whether `continuation` would be generated by greedy sampling from `context` """ pass
"""Compute full log-likelihood of a string, with no truncation, for perplexity computation - We will use the full max context length of the model. - For inputs that exceed the max context length, we divide the tokenized string into chunks of up to the max context length. - IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations which may simply concatenate multiple documents together. - IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into multiple chunks, the last input will still a full-sized context. Example: Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ] Prefix: EOT Max context length: 4 Resulting input/prediction pairs: INPUT: EOT 0 1 2 PRED: 0 1 2 3 INPUT: 3 4 5 6 PRED: 4 5 6 7 INPUT: 5 6 7 8 PRED: 8 9 Observe that: 1. Each token is predicted exactly once 2. For the last pair, we provide the full context, but only score the last two tokens :param requests: list A list of strings string: str String for which we are computing per-toke loglikelihood :return: list A list of pairs (logprob, isgreedy) logprob: float The log probability of `continuation` isgreedy: Whether `continuation` would be generated by greedy sampling from `context` """
https://github.com/NVIDIA/NeMo-Framework-Launcher/blob/4abd481402adc8a061942c486eda6d71f19de718/launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/base.py#L64-L104
4abd481402adc8a061942c486eda6d71f19de718
kaizenflow
github_2023
causify-ai
python
Backtest_TestCase._test
@abc.abstractmethod def _test(self, *args: Any, **kwargs: Any) -> None: """ Run the entire flow. """
""" Run the entire flow. """
https://github.com/causify-ai/kaizenflow/blob/545f66ef6e6b0e5109602dbf1938ef668c55750d/dataflow/backtest/backtest_test_case.py#L123-L127
545f66ef6e6b0e5109602dbf1938ef668c55750d
kaizenflow
github_2023
causify-ai
python
get_forecast_evaluator
def get_forecast_evaluator( forecast_evaluator_class_name: str, **kwargs: Dict[str, Any] ) -> dtfmabfoev.AbstractForecastEvaluator: """ Get the forecast evaluator for the backtest analysis. :param forecast_evaluator_class_name: name of the ForecastEvaluator as str, e.g. "ForecastEvaluatorFromPrices", "ForecastEvaluatorWithOptimizer" :param **kwargs: kwargs for ctor of the provided ForecastEvaluator class :return: ForecastEvaluator object """ # Choose the class based on the label. if forecast_evaluator_class_name == "ForecastEvaluatorFromPrices": forecast_evaluator_class = dtfmfefrpr.ForecastEvaluatorFromPrices # elif forecast_evaluator_class_name == "ForecastEvaluatorWithOptimizer": forecast_evaluator_class = ofevwiop.ForecastEvaluatorWithOptimizer # else: raise ValueError( f"Unsupported forecast_evaluator_class_name: {forecast_evaluator_class_name}" ) # Construct the object. forecast_evaluator = forecast_evaluator_class(**kwargs) return forecast_evaluator
""" Get the forecast evaluator for the backtest analysis. :param forecast_evaluator_class_name: name of the ForecastEvaluator as str, e.g. "ForecastEvaluatorFromPrices", "ForecastEvaluatorWithOptimizer" :param **kwargs: kwargs for ctor of the provided ForecastEvaluator class :return: ForecastEvaluator object """
https://github.com/causify-ai/kaizenflow/blob/545f66ef6e6b0e5109602dbf1938ef668c55750d/dataflow/model/backtest_notebook_utils.py#L138-L163
545f66ef6e6b0e5109602dbf1938ef668c55750d
kaizenflow
github_2023
causify-ai
python
to_typed_csv
def to_typed_csv(df: pd.DataFrame, file_name: str) -> str: """ Convert df into CSV and creates a file with the dtypes of columns. This function creates a file containing the types with the same name and suffix e.g., `foobar.csv.types`. """ # Save the types. dtypes_filename = file_name + ".types" hio.create_enclosing_dir(dtypes_filename, incremental=True) dtypes_dict = str(df.dtypes.apply(lambda x: x.name).to_dict()) # Save the data. df.to_csv(file_name, index=False) with open(dtypes_filename, "w") as dtypes_file: dtypes_file.write(dtypes_dict) return dtypes_filename
""" Convert df into CSV and creates a file with the dtypes of columns. This function creates a file containing the types with the same name and suffix e.g., `foobar.csv.types`. """
https://github.com/causify-ai/kaizenflow/blob/545f66ef6e6b0e5109602dbf1938ef668c55750d/helpers/hcsv.py#L350-L365
545f66ef6e6b0e5109602dbf1938ef668c55750d
kaizenflow
github_2023
causify-ai
python
purify_from_environment
def purify_from_environment(txt: str) -> str: """ Replace environment variables with placeholders. The performed transformations are: 1. Replace the Git path with `$GIT_ROOT` 2. Replace the path of current working dir with `$PWD` 3. Replace the current user name with `$USER_NAME` """ # 1) Remove references to Git modules starting from the innermost one. # Make sure that the path is not followed by a word character. # E.g., `/app/test.txt` is the correct path, while `/application.py` # is not a root path even though `/app` is the part of the text. dir_pattern = r"(?![\w])" for super_module in [False, True]: # Replace the git path with `$GIT_ROOT`. super_module_path = hgit.get_client_root(super_module=super_module) if super_module_path != "/": pattern = re.compile(f"{super_module_path}{dir_pattern}") txt = pattern.sub("$GIT_ROOT", txt) else: # If the git path is `/` then we don't need to do anything. pass # 2) Replace the path of current working dir with `$PWD` pwd = os.getcwd() pattern = re.compile(f"{pwd}{dir_pattern}") txt = pattern.sub("$PWD", txt) # 3) Replace the current user name with `$USER_NAME`. user_name = hsystem.get_user_name() # Set a regex pattern that finds a user name surrounded by dot, dash or space. # E.g., `IMAGE=$CK_ECR_BASE_PATH/amp_test:local-$USER_NAME-1.0.0`, # `--name $USER_NAME.amp_test.app.app`, `run --rm -l user=$USER_NAME`. pattern = rf"([\s\n\-\.\=]|^)+{user_name}+([.\s/-]|$)" # Use `\1` and `\2` to preserve specific characters around `$USER_NAME`. target = r"\1$USER_NAME\2" txt = re.sub(pattern, target, txt) _LOG.debug("After %s: txt='\n%s'", hintros.get_function_name(), txt) return txt
""" Replace environment variables with placeholders. The performed transformations are: 1. Replace the Git path with `$GIT_ROOT` 2. Replace the path of current working dir with `$PWD` 3. Replace the current user name with `$USER_NAME` """
https://github.com/causify-ai/kaizenflow/blob/545f66ef6e6b0e5109602dbf1938ef668c55750d/helpers/hunit_test.py#L372-L409
545f66ef6e6b0e5109602dbf1938ef668c55750d
kaizenflow
github_2023
causify-ai
python
_get_docker_compose_cmd
def _get_docker_compose_cmd( base_image: str, stage: str, version: str, cmd: str, *, # TODO(gp): make these params mandatory. extra_env_vars: Optional[List[str]] = None, extra_docker_compose_files: Optional[List[str]] = None, extra_docker_run_opts: Optional[List[str]] = None, service_name: str = "app", entrypoint: bool = True, generate_docker_compose_file: bool = True, as_user: bool = True, print_docker_config: bool = False, use_bash: bool = False, ) -> str: """ Get `docker-compose` run command. E.g., ``` IMAGE=*****..dkr.ecr.us-east-1.amazonaws.com/amp:dev \ docker-compose \ --file /amp/devops/compose/docker-compose.yml \ --env-file devops/env/default.env \ run \ --rm \ --name grisha.cmamp.app.cmamp1.20220317_232120 \ --user $(id -u):$(id -g) \ app \ bash ``` :param cmd: command to run inside Docker container :param extra_docker_run_opts: additional `docker-compose` run options :param service_name: service to use to run a command :param entrypoint: whether to use the `entrypoint` or not :param generate_docker_compose_file: generate the Docker compose file or not :param as_user: pass the user / group id or not :param print_docker_config: print the docker config for debugging purposes :param use_bash: run command through a shell """ hprint.log( _LOG, logging.DEBUG, "cmd extra_docker_run_opts service_name " "entrypoint as_user print_docker_config use_bash", ) # - Get the base Docker command. docker_cmd_ = _get_docker_base_cmd( base_image, stage, version, service_name, generate_docker_compose_file, extra_env_vars, extra_docker_compose_files, ) # - Add the `config` command for debugging purposes. docker_config_cmd: List[str] = docker_cmd_[:] docker_config_cmd.append( r""" config""" ) # - Add the `run` command. docker_cmd_.append( r""" run \ --rm""" ) # - Add a name to the container. container_name = _get_container_name(service_name) docker_cmd_.append( rf""" --name {container_name}""" ) # - Handle the user. as_user = _run_docker_as_user(as_user) if as_user: docker_cmd_.append( r""" --user $(id -u):$(id -g)""" ) # - Handle the extra docker options. if extra_docker_run_opts: hdbg.dassert_isinstance(extra_docker_run_opts, list) extra_opts = " ".join(extra_docker_run_opts) docker_cmd_.append( rf""" {extra_opts}""" ) # - Handle entrypoint. if entrypoint: docker_cmd_.append( rf""" {service_name}""" ) if cmd: if use_bash: cmd = f"bash -c '{cmd}'" docker_cmd_.append( rf""" {cmd}""" ) else: # No entrypoint. docker_cmd_.append( rf""" --entrypoint bash \ {service_name}""" ) # Print the config for debugging purpose. if print_docker_config: docker_config_cmd_as_str = hlitauti.to_multi_line_cmd(docker_config_cmd) _LOG.debug("docker_config_cmd=\n%s", docker_config_cmd_as_str) _LOG.debug( "docker_config=\n%s", hsystem.system_to_string(docker_config_cmd_as_str)[1], ) # Print the config for debugging purpose. docker_cmd_: str = hlitauti.to_multi_line_cmd(docker_cmd_) return docker_cmd_
""" Get `docker-compose` run command. E.g., ``` IMAGE=*****..dkr.ecr.us-east-1.amazonaws.com/amp:dev \ docker-compose \ --file /amp/devops/compose/docker-compose.yml \ --env-file devops/env/default.env \ run \ --rm \ --name grisha.cmamp.app.cmamp1.20220317_232120 \ --user $(id -u):$(id -g) \ app \ bash ``` :param cmd: command to run inside Docker container :param extra_docker_run_opts: additional `docker-compose` run options :param service_name: service to use to run a command :param entrypoint: whether to use the `entrypoint` or not :param generate_docker_compose_file: generate the Docker compose file or not :param as_user: pass the user / group id or not :param print_docker_config: print the docker config for debugging purposes :param use_bash: run command through a shell """
https://github.com/causify-ai/kaizenflow/blob/545f66ef6e6b0e5109602dbf1938ef668c55750d/helpers/lib_tasks_docker.py#L1133-L1254
545f66ef6e6b0e5109602dbf1938ef668c55750d
kaizenflow
github_2023
causify-ai
python
_apply_trimming
def _apply_trimming(self, df: pd.DataFrame) -> pd.DataFrame: """ Trim `df` according to ATH, weekends, missing data. :param df: as in `compute_portfolio()` :return: `df` trimmed down to: - required and possibly optional columns - "active" bars (bars where at least one instrument has an end-of-bar price) - first index with both a returns prediction and a volatility """ _LOG.debug("df.shape=%s", str(df.shape)) # Restrict to required columns. cols = [self._price_col, self._volatility_col, self._prediction_col] df = df[cols] active_index = cofinanc.infer_active_bars(df[self._price_col]) # Drop rows with no prices (this is an approximate way to handle weekends, # market holidays, and shortened trading sessions). df = df.reindex(index=active_index) _LOG.debug("after active_index: df.shape=%s", df.shape) # Drop indices with prices that precede any returns prediction or # volatility computation. first_valid_prediction_index = df[ self._prediction_col ].first_valid_index() hdbg.dassert_is_not(first_valid_prediction_index, None) _LOG.debug(hprint.to_str("first_valid_prediction_index")) # first_valid_volatility_index = df[ self._volatility_col ].first_valid_index() hdbg.dassert_is_not(first_valid_volatility_index, None) _LOG.debug(hprint.to_str("first_valid_volatility_index")) # first_valid_index = max( first_valid_prediction_index, first_valid_volatility_index ) df = df.loc[first_valid_index:] _LOG.debug("df.shape=%s", str(df.shape)) _LOG.debug("trimmed df=\n%s", hpandas.df_to_str(df)) return df
""" Trim `df` according to ATH, weekends, missing data. :param df: as in `compute_portfolio()` :return: `df` trimmed down to: - required and possibly optional columns - "active" bars (bars where at least one instrument has an end-of-bar price) - first index with both a returns prediction and a volatility """
https://github.com/causify-ai/kaizenflow/blob/545f66ef6e6b0e5109602dbf1938ef668c55750d/optimizer/forecast_evaluator_with_optimizer.py#L285-L325
545f66ef6e6b0e5109602dbf1938ef668c55750d
kaizenflow
github_2023
causify-ai
python
get_github_create_issues_table_query
def get_github_create_issues_table_query() -> str: """ Get SQL query to create github_issues table. This table contains the data as it is downloaded. """ query = """ CREATE TABLE IF NOT EXISTS github_issues( id SERIAL PRIMARY KEY, number NUMERIC, title VARCHAR(500) NOT NULL, created_at TIMESTAMP, updated_at TIMESTAMP, closed_at TIMESTAMP, author_association VARCHAR(255), comments NUMERIC, body VARCHAR(50000) , user_login VARCHAR(255) NOT NULL, user_id NUMERIC, Crypto_Name VARCHAR(255) NOT NULL, Extension VARCHAR(255) NOT NULL ) """ return query
""" Get SQL query to create github_issues table. This table contains the data as it is downloaded. """
https://github.com/causify-ai/kaizenflow/blob/545f66ef6e6b0e5109602dbf1938ef668c55750d/sorrentum_sandbox/spring2023/ml_projects/SorrIssue21_Team2_Implement_sandbox_for_GitHub_2/db_team2.py#L52-L76
545f66ef6e6b0e5109602dbf1938ef668c55750d
gyre
github_2023
stablecabal
python
sample_dpmpp_2m
@torch.no_grad() def sample_dpmpp_2m( model, x, sigmas, extra_args=None, callback=None, disable=None, warmup_lms=False, ddim_cutoff=0.0, ): """DPM-Solver++(2M).""" extra_args = {} if extra_args is None else extra_args s_in = x.new_ones([x.shape[0]]) sigma_fn = lambda t: t.neg().exp() t_fn = lambda sigma: sigma.log().neg() old_denoised = None for i in trange(len(sigmas) - 1, disable=disable): denoised = model(x, sigmas[i] * s_in, **extra_args) if callback is not None: callback( { "x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised, } ) t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1]) h = t_next - t if old_denoised is None and warmup_lms: r = 1 / 2 s = t + r * h x_2 = (sigma_fn(s) / sigma_fn(t)) * x - (-h * r).expm1() * denoised denoised_i = model(x_2, sigma_fn(s) * s_in, **extra_args) elif sigmas[i + 1] <= ddim_cutoff or old_denoised is None: denoised_i = denoised else: h_last = t - t_fn(sigmas[i - 1]) r = h_last / h denoised_i = (1 + 1 / (2 * r)) * denoised - (1 / (2 * r)) * old_denoised x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_i old_denoised = denoised return x
"""DPM-Solver++(2M)."""
https://github.com/stablecabal/gyre/blob/9cba9781cd458acb8b821f5dc584299cab1ed2f3/gyre/pipeline/schedulers/sample_dpmpp_2m.py#L5-L50
9cba9781cd458acb8b821f5dc584299cab1ed2f3
BigWorld-Engine-14.4.1
github_2023
v2v3v4
python
Select.froms
@property def froms(self): """Return the displayed list of FromClause elements.""" return self._get_display_froms()
"""Return the displayed list of FromClause elements."""
https://github.com/v2v3v4/BigWorld-Engine-14.4.1/blob/4389085c8ce35cff887a4cc18fc47d1133d89ffb/programming/bigworld/build/bw_internal/perf/sqlalchemy/sql/expression.py#L4842-L4846
4389085c8ce35cff887a4cc18fc47d1133d89ffb
BigWorld-Engine-14.4.1
github_2023
v2v3v4
python
Complex.__truediv__
@abstractmethod def __truediv__(self, other): """self / other with __future__ division. Should promote to float when necessary. """ raise NotImplementedError
"""self / other with __future__ division. Should promote to float when necessary. """
https://github.com/v2v3v4/BigWorld-Engine-14.4.1/blob/4389085c8ce35cff887a4cc18fc47d1133d89ffb/programming/bigworld/third_party/python/Lib/numbers.py#L123-L129
4389085c8ce35cff887a4cc18fc47d1133d89ffb
BigWorld-Engine-14.4.1
github_2023
v2v3v4
python
is_namespace
def is_namespace(self): """Returns true if name binding introduces new namespace. If the name is used as the target of a function or class statement, this will be true. Note that a single name can be bound to multiple objects. If is_namespace() is true, the name may also be bound to other objects, like an int or list, that does not introduce a new namespace. """ return bool(self.__namespaces)
"""Returns true if name binding introduces new namespace. If the name is used as the target of a function or class statement, this will be true. Note that a single name can be bound to multiple objects. If is_namespace() is true, the name may also be bound to other objects, like an int or list, that does not introduce a new namespace. """
https://github.com/v2v3v4/BigWorld-Engine-14.4.1/blob/4389085c8ce35cff887a4cc18fc47d1133d89ffb/programming/bigworld/third_party/python/Lib/symtable.py#L207-L218
4389085c8ce35cff887a4cc18fc47d1133d89ffb
BigWorld-Engine-14.4.1
github_2023
v2v3v4
python
create_arc
def create_arc(self, *args, **kw): """Create arc shaped region with coordinates x1,y1,x2,y2.""" return self._create('arc', args, kw)
"""Create arc shaped region with coordinates x1,y1,x2,y2."""
https://github.com/v2v3v4/BigWorld-Engine-14.4.1/blob/4389085c8ce35cff887a4cc18fc47d1133d89ffb/programming/bigworld/third_party/python/Lib/lib-tk/Tkinter.py#L2271-L2273
4389085c8ce35cff887a4cc18fc47d1133d89ffb
BigWorld-Engine-14.4.1
github_2023
v2v3v4
python
clearstamp
def clearstamp(self, stampid): """Delete stamp with given stampid Argument: stampid - an integer, must be return value of previous stamp() call. Example (for a Turtle instance named turtle): >>> turtle.color("blue") >>> astamp = turtle.stamp() >>> turtle.fd(50) >>> turtle.clearstamp(astamp) """ self._clearstamp(stampid) self._update()
"""Delete stamp with given stampid Argument: stampid - an integer, must be return value of previous stamp() call. Example (for a Turtle instance named turtle): >>> turtle.color("blue") >>> astamp = turtle.stamp() >>> turtle.fd(50) >>> turtle.clearstamp(astamp) """
https://github.com/v2v3v4/BigWorld-Engine-14.4.1/blob/4389085c8ce35cff887a4cc18fc47d1133d89ffb/programming/bigworld/third_party/python/Lib/lib-tk/turtle.py#L2933-L2946
4389085c8ce35cff887a4cc18fc47d1133d89ffb
BigWorld-Engine-14.4.1
github_2023
v2v3v4
python
getEventCategory
def getEventCategory(self, record): """ Return the event category for the record. Override this if you want to specify your own categories. This version returns 0. """ return 0
""" Return the event category for the record. Override this if you want to specify your own categories. This version returns 0. """
https://github.com/v2v3v4/BigWorld-Engine-14.4.1/blob/4389085c8ce35cff887a4cc18fc47d1133d89ffb/programming/bigworld/third_party/python/Lib/logging/handlers.py#L994-L1001
4389085c8ce35cff887a4cc18fc47d1133d89ffb
BigWorld-Engine-14.4.1
github_2023
v2v3v4
python
errorCheck
def errorCheck(self): """Check for an error if necessary. This only generates code if the variable's mode is ErrorMode. """ if self.flags == ErrorMode: self.type.errorCheck(self.name)
"""Check for an error if necessary. This only generates code if the variable's mode is ErrorMode. """
https://github.com/v2v3v4/BigWorld-Engine-14.4.1/blob/4389085c8ce35cff887a4cc18fc47d1133d89ffb/programming/bigworld/third_party/python/Tools/bgen/bgen/bgenVariable.py#L91-L97
4389085c8ce35cff887a4cc18fc47d1133d89ffb
griptape
github_2023
griptape-ai
python
get_mime_type
def get_mime_type(file_path_or_bytes: str | bytes) -> str: """Attempt to determine the MIME type of a file or bytes. If the input is a file path, we use the built-in `mimetypes` package to guess the MIME type. If the input is bytes, we use the `filetype` library to determine the MIME type. If the library cannot determine the MIME type (data missing magic bytes), we use a few heuristics to guess the type. Args: file_path_or_bytes: The path to the file or the bytes to check. Returns: The MIME type of the file or bytes. """ if isinstance(file_path_or_bytes, bytes): filetype_guess = filetype.guess(file_path_or_bytes) if filetype_guess is None: if _is_text(file_path_or_bytes): if _is_json(file_path_or_bytes): return "application/json" elif _is_csv(file_path_or_bytes): return "text/csv" return "text/plain" else: return "application/octet-stream" else: return filetype_guess.mime else: type_, _ = mimetypes.guess_type(file_path_or_bytes) if type_ is None: return "application/octet-stream" else: return type_
"""Attempt to determine the MIME type of a file or bytes. If the input is a file path, we use the built-in `mimetypes` package to guess the MIME type. If the input is bytes, we use the `filetype` library to determine the MIME type. If the library cannot determine the MIME type (data missing magic bytes), we use a few heuristics to guess the type. Args: file_path_or_bytes: The path to the file or the bytes to check. Returns: The MIME type of the file or bytes. """
https://github.com/griptape-ai/griptape/blob/f9ac289715eeb24cb15cbb31e8e32c0e9fb00d45/griptape/utils/file_utils.py#L11-L43
f9ac289715eeb24cb15cbb31e8e32c0e9fb00d45
f2
github_2023
Johnserf-Seed
python
fetch_play_list
async def fetch_play_list( self, secUid: str, cursor: int, page_counts: int, ) -> UserPlayListFilter: """ 用于获取指定用户的作品合集列表 (Used to get video mix list of specified user) Args: secUid: str: 用户ID (User ID) cursor: int: 分页游标 (Page cursor) page_counts: int: 分页数量 (Page counts) Return: playlist: UserPlayListFilter: 作品合集列表 (Video mix list) """ logger.debug(_("处理用户:{0} 的作品合集列表").format(secUid)) async with TiktokCrawler(self.kwargs) as crawler: params = UserPlayList(secUid=secUid, cursor=cursor, count=page_counts) response = await crawler.fetch_user_play_list(params) playlist = UserPlayListFilter(response) if not playlist.hasPlayList: logger.info(_("用户:{0} 没有作品合集").format(secUid)) return {} logger.debug(_("当前请求的cursor:{0}").format(cursor)) logger.debug( _("作品合集ID:{0} 作品合集标题:{1}").format( playlist.mixId, playlist.mixName ) ) return playlist
""" 用于获取指定用户的作品合集列表 (Used to get video mix list of specified user) Args: secUid: str: 用户ID (User ID) cursor: int: 分页游标 (Page cursor) page_counts: int: 分页数量 (Page counts) Return: playlist: UserPlayListFilter: 作品合集列表 (Video mix list) """
https://github.com/Johnserf-Seed/f2/blob/c80eeabf0622b34549e3316f4f711c3f01109bc1/f2/apps/tiktok/handler.py#L688-L724
c80eeabf0622b34549e3316f4f711c3f01109bc1
DSVT
github_2023
Haiyang-W
python
bilinear_interpolate_torch
def bilinear_interpolate_torch(im, x, y): """ Args: im: (H, W, C) [y, x] x: (N) y: (N) Returns: """ x0 = torch.floor(x).long() x1 = x0 + 1 y0 = torch.floor(y).long() y1 = y0 + 1 x0 = torch.clamp(x0, 0, im.shape[1] - 1) x1 = torch.clamp(x1, 0, im.shape[1] - 1) y0 = torch.clamp(y0, 0, im.shape[0] - 1) y1 = torch.clamp(y1, 0, im.shape[0] - 1) Ia = im[y0, x0] Ib = im[y1, x0] Ic = im[y0, x1] Id = im[y1, x1] wa = (x1.type_as(x) - x) * (y1.type_as(y) - y) wb = (x1.type_as(x) - x) * (y - y0.type_as(y)) wc = (x - x0.type_as(x)) * (y1.type_as(y) - y) wd = (x - x0.type_as(x)) * (y - y0.type_as(y)) ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd) return ans
""" Args: im: (H, W, C) [y, x] x: (N) y: (N) Returns: """
https://github.com/Haiyang-W/DSVT/blob/8cfc2a6f23eed0b10aabcdc4768c60b184357061/pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py#L11-L42
8cfc2a6f23eed0b10aabcdc4768c60b184357061
OnePose_Plus_Plus
github_2023
zju3dv
python
merge_train_core
def merge_train_core( anno_2d_file, avg_anno_3d_file, idxs_file, img_id, ann_id, images, annotations, ): """ Merge training annotations of different objects""" with open(anno_2d_file, "r") as f: annos_2d = json.load(f) for anno_2d in annos_2d: img_id += 1 info = { "id": img_id, "img_file": anno_2d["img_file"], } images.append(info) ann_id += 1 anno = { "image_id": img_id, "id": ann_id, "pose_file": anno_2d["pose_file"], "anno2d_file": anno_2d["anno_file"], "avg_anno3d_file": avg_anno_3d_file, "idxs_file": idxs_file, } annotations.append(anno) return img_id, ann_id
""" Merge training annotations of different objects"""
https://github.com/zju3dv/OnePose_Plus_Plus/blob/fc660efb1f594468642d681e35e4843928f16f3e/merge.py#L13-L46
fc660efb1f594468642d681e35e4843928f16f3e
RDM-Region-Aware-Diffusion-Model
github_2023
haha-lisa
python
get_named_beta_schedule
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps): """ Get a pre-defined beta schedule for the given name. The beta schedule library consists of beta schedules which remain similar in the limit of num_diffusion_timesteps. Beta schedules may be added, but should not be removed or changed once they are committed to maintain backwards compatibility. """ #if schedule_name == "linear": # Linear schedule from Ho et al, extended to work for any number of # diffusion steps. # scale = 1000 / num_diffusion_timesteps # beta_start = scale * 0.0001 # beta_end = scale * 0.02 # return np.linspace( # beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64 # ) if schedule_name == "linear": linear_start = 0.00085 linear_end = 0.012 return np.linspace(linear_start ** 0.5, linear_end ** 0.5, num_diffusion_timesteps, dtype=np.float64) ** 2 elif schedule_name == "cosine": return betas_for_alpha_bar( num_diffusion_timesteps, lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2, ) else: raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
""" Get a pre-defined beta schedule for the given name. The beta schedule library consists of beta schedules which remain similar in the limit of num_diffusion_timesteps. Beta schedules may be added, but should not be removed or changed once they are committed to maintain backwards compatibility. """
https://github.com/haha-lisa/RDM-Region-Aware-Diffusion-Model/blob/be0c767f05af74021530962218106c9c20cad3f0/guided_diffusion/gaussian_diffusion.py#L20-L49
be0c767f05af74021530962218106c9c20cad3f0
CFScanner
github_2023
MortezaBashsiz
python
CloudflareSolver._parse_proxy
@staticmethod def _parse_proxy(proxy: str) -> Dict[str, str]: """ Parse a proxy URL string into a dictionary of proxy parameters for the Playwright browser. Parameters ---------- proxy : str Proxy URL string. Returns ------- Dict[str, str] Dictionary of proxy parameters. """ if "@" in proxy: proxy_regex = re.match("(.+)://(.+):(.+)@(.+)", proxy) server = f"{proxy_regex.group(1)}://{proxy_regex.group(4)}" proxy_params = { "server": server, "username": proxy_regex.group(2), "password": proxy_regex.group(3), } else: proxy_params = {"server": proxy} return proxy_params
""" Parse a proxy URL string into a dictionary of proxy parameters for the Playwright browser. Parameters ---------- proxy : str Proxy URL string. Returns ------- Dict[str, str] Dictionary of proxy parameters. """
https://github.com/MortezaBashsiz/CFScanner/blob/bcb96dd437e9cf90350ee5581df613b92380827f/other/gist/cfchallenger.py#L96-L123
bcb96dd437e9cf90350ee5581df613b92380827f
temporian
github_2023
google
python
test_correct_or
def test_correct_or(self) -> None: """Test correct OR operator.""" expected = event_set( timestamps=[1, 2, 3, 4], features={"x": [True, False, True, True]}, same_sampling_as=self.evset_1, ) assertOperatorResult(self, self.evset_1 | self.evset_2, expected)
"""Test correct OR operator."""
https://github.com/google/temporian/blob/1e33b75b9fadfaf1c30bc20725cba9753105e4f9/temporian/core/operators/test/test_logical.py#L43-L50
1e33b75b9fadfaf1c30bc20725cba9753105e4f9
temporian
github_2023
google
python
PlotterBackend.finalize_subplot
@abstractmethod def finalize_subplot( self, ): """Finalizes a previously added sub plot.""" raise NotImplementedError
"""Finalizes a previously added sub plot."""
https://github.com/google/temporian/blob/1e33b75b9fadfaf1c30bc20725cba9753105e4f9/temporian/implementation/numpy/data/plotter_base.py#L77-L83
1e33b75b9fadfaf1c30bc20725cba9753105e4f9
cloudfoxable
github_2023
BishopFox
python
Pack
def Pack(self, msg, type_url_prefix='type.googleapis.com/', deterministic=None): """Packs the specified message into current Any message.""" if len(type_url_prefix) < 1 or type_url_prefix[-1] != '/': self.type_url = '%s/%s' % (type_url_prefix, msg.DESCRIPTOR.full_name) else: self.type_url = '%s%s' % (type_url_prefix, msg.DESCRIPTOR.full_name) self.value = msg.SerializeToString(deterministic=deterministic)
"""Packs the specified message into current Any message."""
https://github.com/BishopFox/cloudfoxable/blob/b7f028ebf2c9d9489e19736942b99fb07b0d0251/aws/challenges/Variable/data/lambda-src-backup/protobuf/internal/well_known_types.py#L64-L71
b7f028ebf2c9d9489e19736942b99fb07b0d0251
cloudfoxable
github_2023
BishopFox
python
_path_get
def _path_get(self, data, path): """Return the nested data at the given path. For instance: data = {'foo': ['bar', 'baz']} path = ['foo', 0] ==> 'bar' """ # jmespath isn't used here because it would be difficult to actually # create the jmespath query when taking all of the unknowns of key # structure into account. Gross though this is, it is simple and not # very error prone. d = data for step in path: d = d[step] return d
"""Return the nested data at the given path. For instance: data = {'foo': ['bar', 'baz']} path = ['foo', 0] ==> 'bar' """
https://github.com/BishopFox/cloudfoxable/blob/b7f028ebf2c9d9489e19736942b99fb07b0d0251/aws/challenges/Variable/data/lambda-src/botocore/paginate.py#L143-L158
b7f028ebf2c9d9489e19736942b99fb07b0d0251
cloudfoxable
github_2023
BishopFox
python
create_client
def create_client( self, service_name, region_name=None, api_version=None, use_ssl=True, verify=None, endpoint_url=None, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, config=None, ): """Create a botocore client. :type service_name: string :param service_name: The name of the service for which a client will be created. You can use the ``Session.get_available_services()`` method to get a list of all available service names. :type region_name: string :param region_name: The name of the region associated with the client. A client is associated with a single region. :type api_version: string :param api_version: The API version to use. By default, botocore will use the latest API version when creating a client. You only need to specify this parameter if you want to use a previous API version of the client. :type use_ssl: boolean :param use_ssl: Whether or not to use SSL. By default, SSL is used. Note that not all services support non-ssl connections. :type verify: boolean/string :param verify: Whether or not to verify SSL certificates. By default SSL certificates are verified. You can provide the following values: * False - do not validate SSL certificates. SSL will still be used (unless use_ssl is False), but SSL certificates will not be verified. * path/to/cert/bundle.pem - A filename of the CA cert bundle to uses. You can specify this argument if you want to use a different CA cert bundle than the one used by botocore. :type endpoint_url: string :param endpoint_url: The complete URL to use for the constructed client. Normally, botocore will automatically construct the appropriate URL to use when communicating with a service. You can specify a complete URL (including the "http/https" scheme) to override this behavior. If this value is provided, then ``use_ssl`` is ignored. :type aws_access_key_id: string :param aws_access_key_id: The access key to use when creating the client. This is entirely optional, and if not provided, the credentials configured for the session will automatically be used. You only need to provide this argument if you want to override the credentials used for this specific client. :type aws_secret_access_key: string :param aws_secret_access_key: The secret key to use when creating the client. Same semantics as aws_access_key_id above. :type aws_session_token: string :param aws_session_token: The session token to use when creating the client. Same semantics as aws_access_key_id above. :type config: botocore.client.Config :param config: Advanced client configuration options. If a value is specified in the client config, its value will take precedence over environment variables and configuration values, but not over a value passed explicitly to the method. If a default config object is set on the session, the config object used when creating the client will be the result of calling ``merge()`` on the default config with the config provided to this call. :rtype: botocore.client.BaseClient :return: A botocore client instance """ default_client_config = self.get_default_client_config() # If a config is provided and a default config is set, then # use the config resulting from merging the two. if config is not None and default_client_config is not None: config = default_client_config.merge(config) # If a config was not provided then use the default # client config from the session elif default_client_config is not None: config = default_client_config region_name = self._resolve_region_name(region_name, config) # Figure out the verify value base on the various # configuration options. if verify is None: verify = self.get_config_variable('ca_bundle') if api_version is None: api_version = self.get_config_variable('api_versions').get( service_name, None ) loader = self.get_component('data_loader') event_emitter = self.get_component('event_emitter') response_parser_factory = self.get_component('response_parser_factory') if config is not None and config.signature_version is UNSIGNED: credentials = None elif ( aws_access_key_id is not None and aws_secret_access_key is not None ): credentials = botocore.credentials.Credentials( access_key=aws_access_key_id, secret_key=aws_secret_access_key, token=aws_session_token, ) elif self._missing_cred_vars(aws_access_key_id, aws_secret_access_key): raise PartialCredentialsError( provider='explicit', cred_var=self._missing_cred_vars( aws_access_key_id, aws_secret_access_key ), ) else: credentials = self.get_credentials() auth_token = self.get_auth_token() endpoint_resolver = self._get_internal_component('endpoint_resolver') exceptions_factory = self._get_internal_component('exceptions_factory') config_store = self.get_component('config_store') defaults_mode = self._resolve_defaults_mode(config, config_store) if defaults_mode != 'legacy': smart_defaults_factory = self._get_internal_component( 'smart_defaults_factory' ) config_store = copy.deepcopy(config_store) smart_defaults_factory.merge_smart_defaults( config_store, defaults_mode, region_name ) client_creator = botocore.client.ClientCreator( loader, endpoint_resolver, self.user_agent(), event_emitter, retryhandler, translate, response_parser_factory, exceptions_factory, config_store, ) client = client_creator.create_client( service_name=service_name, region_name=region_name, is_secure=use_ssl, endpoint_url=endpoint_url, verify=verify, credentials=credentials, scoped_config=self.get_scoped_config(), client_config=config, api_version=api_version, auth_token=auth_token, ) monitor = self._get_internal_component('monitor') if monitor is not None: monitor.register(client.meta.events) return client
"""Create a botocore client. :type service_name: string :param service_name: The name of the service for which a client will be created. You can use the ``Session.get_available_services()`` method to get a list of all available service names. :type region_name: string :param region_name: The name of the region associated with the client. A client is associated with a single region. :type api_version: string :param api_version: The API version to use. By default, botocore will use the latest API version when creating a client. You only need to specify this parameter if you want to use a previous API version of the client. :type use_ssl: boolean :param use_ssl: Whether or not to use SSL. By default, SSL is used. Note that not all services support non-ssl connections. :type verify: boolean/string :param verify: Whether or not to verify SSL certificates. By default SSL certificates are verified. You can provide the following values: * False - do not validate SSL certificates. SSL will still be used (unless use_ssl is False), but SSL certificates will not be verified. * path/to/cert/bundle.pem - A filename of the CA cert bundle to uses. You can specify this argument if you want to use a different CA cert bundle than the one used by botocore. :type endpoint_url: string :param endpoint_url: The complete URL to use for the constructed client. Normally, botocore will automatically construct the appropriate URL to use when communicating with a service. You can specify a complete URL (including the "http/https" scheme) to override this behavior. If this value is provided, then ``use_ssl`` is ignored. :type aws_access_key_id: string :param aws_access_key_id: The access key to use when creating the client. This is entirely optional, and if not provided, the credentials configured for the session will automatically be used. You only need to provide this argument if you want to override the credentials used for this specific client. :type aws_secret_access_key: string :param aws_secret_access_key: The secret key to use when creating the client. Same semantics as aws_access_key_id above. :type aws_session_token: string :param aws_session_token: The session token to use when creating the client. Same semantics as aws_access_key_id above. :type config: botocore.client.Config :param config: Advanced client configuration options. If a value is specified in the client config, its value will take precedence over environment variables and configuration values, but not over a value passed explicitly to the method. If a default config object is set on the session, the config object used when creating the client will be the result of calling ``merge()`` on the default config with the config provided to this call. :rtype: botocore.client.BaseClient :return: A botocore client instance """
https://github.com/BishopFox/cloudfoxable/blob/b7f028ebf2c9d9489e19736942b99fb07b0d0251/aws/challenges/Variable/data/lambda-src/botocore/session.py#L826-L991
b7f028ebf2c9d9489e19736942b99fb07b0d0251
Diffusion-SDF
github_2023
princeton-computational-imaging
python
__init__
def __init__(self, num_classes, in_channels=3, depth=5, start_filts=64, up_mode='transpose', same_channels=False, merge_mode='concat', **kwargs): """ Arguments: in_channels: int, number of channels in the input tensor. Default is 3 for RGB images. depth: int, number of MaxPools in the U-Net. start_filts: int, number of convolutional filters for the first conv. up_mode: string, type of upconvolution. Choices: 'transpose' for transpose convolution or 'upsample' for nearest neighbour upsampling. """ super(UNet, self).__init__() if up_mode in ('transpose', 'upsample'): self.up_mode = up_mode else: raise ValueError("\"{}\" is not a valid mode for " "upsampling. Only \"transpose\" and " "\"upsample\" are allowed.".format(up_mode)) if merge_mode in ('concat', 'add'): self.merge_mode = merge_mode else: raise ValueError("\"{}\" is not a valid mode for" "merging up and down paths. " "Only \"concat\" and " "\"add\" are allowed.".format(up_mode)) # NOTE: up_mode 'upsample' is incompatible with merge_mode 'add' if self.up_mode == 'upsample' and self.merge_mode == 'add': raise ValueError("up_mode \"upsample\" is incompatible " "with merge_mode \"add\" at the moment " "because it doesn't make sense to use " "nearest neighbour to reduce " "depth channels (by half).") self.num_classes = num_classes self.in_channels = in_channels self.start_filts = start_filts self.depth = depth self.down_convs = [] self.up_convs = [] # create the encoder pathway and add to a list for i in range(depth): ins = self.in_channels if i == 0 else outs outs = self.start_filts*(2**i) if not same_channels else self.in_channels pooling = True if i < depth-1 else False #print("down ins, outs: ", ins, outs) # [latent dim, 32], [32, 64]...[128, 256] down_conv = DownConv(ins, outs, pooling=pooling) self.down_convs.append(down_conv) # create the decoder pathway and add to a list # - careful! decoding only requires depth-1 blocks for i in range(depth-1): ins = outs outs = ins // 2 if not same_channels else ins up_conv = UpConv(ins, outs, up_mode=up_mode, merge_mode=merge_mode) self.up_convs.append(up_conv) #print("up ins, outs: ", ins, outs)# [256, 128]...[64, 32]; final 32 to latent is done through self.conv_final # add the list of modules to current module self.down_convs = nn.ModuleList(self.down_convs) self.up_convs = nn.ModuleList(self.up_convs) self.conv_final = conv1x1(outs, self.num_classes) self.reset_params()
""" Arguments: in_channels: int, number of channels in the input tensor. Default is 3 for RGB images. depth: int, number of MaxPools in the U-Net. start_filts: int, number of convolutional filters for the first conv. up_mode: string, type of upconvolution. Choices: 'transpose' for transpose convolution or 'upsample' for nearest neighbour upsampling. """
https://github.com/princeton-computational-imaging/Diffusion-SDF/blob/7eb2d6786b8864dc45cd180be8ad5c5b2f8c1f8f/models/archs/encoders/conv_pointnet.py#L389-L462
7eb2d6786b8864dc45cd180be8ad5c5b2f8c1f8f
UniPC
github_2023
wl-zhao
python
marginal_lambda
def marginal_lambda(self, t): """ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. """ log_mean_coeff = self.marginal_log_mean_coeff(t) log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) return log_mean_coeff - log_std
""" Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. """
https://github.com/wl-zhao/UniPC/blob/cf9de85bf2ed68137e6fba5f165f451677b174c4/example/score_sde_pytorch/uni_pc.py#L128-L134
cf9de85bf2ed68137e6fba5f165f451677b174c4
modulus
github_2023
NVIDIA
python
build_fno
def build_fno(self, num_fno_modes: List[int]) -> None: """construct FNO block. Parameters ---------- num_fno_modes : List[int] Number of Fourier modes kept in spectral convolutions """ # Build Neural Fourier Operators self.spconv_layers = nn.ModuleList() self.conv_layers = nn.ModuleList() for _ in range(self.num_fno_layers): self.spconv_layers.append( layers.SpectralConv3d( self.fno_width, self.fno_width, num_fno_modes[0], num_fno_modes[1], num_fno_modes[2], ) ) self.conv_layers.append(nn.Conv3d(self.fno_width, self.fno_width, 1))
"""construct FNO block. Parameters ---------- num_fno_modes : List[int] Number of Fourier modes kept in spectral convolutions """
https://github.com/NVIDIA/modulus/blob/e6d7b02fb19ab9cdb3138de228ca3d6f0c99e7d1/modulus/models/fno/fno.py#L463-L484
e6d7b02fb19ab9cdb3138de228ca3d6f0c99e7d1
modulus
github_2023
NVIDIA
python
is_ignored
def is_ignored(path, working_path, ignore_patterns): """ Check if the path needs to be ignored """ # Get the git root path to stop the search git_root_path = Path(__file__) / Path(working_path) git_root_path = git_root_path.resolve() for pattern in ignore_patterns: normalized_pattern = pattern.rstrip("/") # Filter paths that are outside git root relevant_children = [ part for part in [path] + list(path.parents) if git_root_path in part.parents or part == git_root_path ] # Check the directory itself and each parent directory for part in relevant_children: # Match directories (patterns ending with '/') if pattern.endswith("/") and part.is_dir(): if fnmatch.fnmatch(part.name, normalized_pattern): return True # Match files or directories without a trailing '/' if not pattern.endswith("/") and ( fnmatch.fnmatch(str(part), pattern) or fnmatch.fnmatch(part.name, normalized_pattern) ): return True return False
""" Check if the path needs to be ignored """
https://github.com/NVIDIA/modulus/blob/e6d7b02fb19ab9cdb3138de228ca3d6f0c99e7d1/test/ci_tests/header_check.py#L42-L74
e6d7b02fb19ab9cdb3138de228ca3d6f0c99e7d1
modulus
github_2023
NVIDIA
python
test_irfft_ort_op
@check_ort_version() @pytest.mark.parametrize("dft_dim", [-1, 1]) def test_irfft_ort_op( test_data: Tensor, norm: str, dft_dim: int, rtol: float = 1e-5, atol: float = 1e-5 ): """Test IRFFT onnx runtime operation is consistent with torch irfft""" x = test_data.transpose(-1, dft_dim) x = fft.rfft(x, dim=dft_dim, norm=norm) class CustomIrfft(nn.Module): def forward(self, y): return fft.irfft(y, dim=dft_dim, norm=norm) model = CustomIrfft() output = model(x) x0 = torch.view_as_real(x) onnx_model = export_to_onnx_stream(model, x0) output_ort = run_onnx_inference(onnx_model, (x0,)) assert len(output_ort) == 1 output_onnx = torch.Tensor(output_ort[0]) assert torch.allclose(output, output_onnx, rtol, atol)
"""Test IRFFT onnx runtime operation is consistent with torch irfft"""
https://github.com/NVIDIA/modulus/blob/e6d7b02fb19ab9cdb3138de228ca3d6f0c99e7d1/test/deploy/test_onnx_fft.py#L168-L190
e6d7b02fb19ab9cdb3138de228ca3d6f0c99e7d1
safety-gymnasium
github_2023
PKU-Alignment
python
build_sensor_observation_space
def build_sensor_observation_space(self) -> gymnasium.spaces.Dict: """Build observation space for all sensor types. Returns: gymnasium.spaces.Dict: The observation space generated by sensors bound with agent. """ obs_space_dict = {} for sensor in self.sensor_conf.sensors: # Explicitly listed sensors dim = self.sensor_info.sensor_dim[sensor] obs_space_dict[sensor] = gymnasium.spaces.Box(-np.inf, np.inf, (dim,), dtype=np.float64) # Velocities don't have wraparound effects that rotational positions do # Wraparounds are not kind to neural networks # Whereas the angle 2*pi is very close to 0, this isn't true in the network # In theory the network could learn this, but in practice we simplify it # when the sensors_angle_components switch is enabled. for sensor in self.sensor_info.hinge_vel_names: obs_space_dict[sensor] = gymnasium.spaces.Box(-np.inf, np.inf, (1,), dtype=np.float64) for sensor in self.sensor_info.ballangvel_names: obs_space_dict[sensor] = gymnasium.spaces.Box(-np.inf, np.inf, (3,), dtype=np.float64) if self.sensor_info.freejoint_pos_name: sensor = self.sensor_info.freejoint_pos_name obs_space_dict[sensor] = gymnasium.spaces.Box(-np.inf, np.inf, (1,), dtype=np.float64) obs_space_dict[sensor + '1'] = gymnasium.spaces.Box( -np.inf, np.inf, (1,), dtype=np.float64, ) if self.sensor_info.freejoint_qvel_name: sensor = self.sensor_info.freejoint_qvel_name obs_space_dict[sensor] = gymnasium.spaces.Box(-np.inf, np.inf, (3,), dtype=np.float64) obs_space_dict[sensor + '1'] = gymnasium.spaces.Box( -np.inf, np.inf, (3,), dtype=np.float64, ) # Angular positions have wraparound effects, so output something more friendly if self.sensor_conf.sensors_angle_components: # Single joints are turned into sin(x), cos(x) pairs # These should be easier to learn for neural networks, # Since for angles, small perturbations in angle give small differences in sin/cos for sensor in self.sensor_info.hinge_pos_names: obs_space_dict[sensor] = gymnasium.spaces.Box( -np.inf, np.inf, (2,), dtype=np.float64, ) # Quaternions are turned into 3x3 rotation matrices # Quaternions have a wraparound issue in how they are normalized, # where the convention is to change the sign so the first element to be positive. # If the first element is close to 0, this can mean small differences in rotation # lead to large differences in value as the latter elements change sign. # This also means that the first element of the quaternion is not expectation zero. # The SO(3) rotation representation would be a good replacement here, # since it smoothly varies between values in all directions (the property we want), # but right now we have very little code to support SO(3) rotations. # Instead we use a 3x3 rotation matrix, which if normalized, smoothly varies as well. for sensor in self.sensor_info.ballquat_names: obs_space_dict[sensor] = gymnasium.spaces.Box( -np.inf, np.inf, (3, 3), dtype=np.float64, ) else: # Otherwise include the sensor without any processing for sensor in self.sensor_info.hinge_pos_names: obs_space_dict[sensor] = gymnasium.spaces.Box( -np.inf, np.inf, (1,), dtype=np.float64, ) for sensor in self.sensor_info.ballquat_names: obs_space_dict[sensor] = gymnasium.spaces.Box( -np.inf, np.inf, (4,), dtype=np.float64, ) return obs_space_dict
"""Build observation space for all sensor types. Returns: gymnasium.spaces.Dict: The observation space generated by sensors bound with agent. """
https://github.com/PKU-Alignment/safety-gymnasium/blob/3b117c1ee896b62cd47a527d201d1117fd36ef3d/safety_gymnasium/tasks/safe_multi_agent/bases/base_agent.py#L345-L429
3b117c1ee896b62cd47a527d201d1117fd36ef3d
DDPM-IP
github_2023
forever208
python
update_ema
def update_ema(target_params, source_params, rate=0.99): """ Update target parameters to be closer to those of source parameters using an exponential moving average. :param target_params: the target parameter sequence. :param source_params: the source parameter sequence. :param rate: the EMA rate (closer to 1 means slower). """ for targ, src in zip(target_params, source_params): targ.detach().mul_(rate).add_(src, alpha=1 - rate)
""" Update target parameters to be closer to those of source parameters using an exponential moving average. :param target_params: the target parameter sequence. :param source_params: the source parameter sequence. :param rate: the EMA rate (closer to 1 means slower). """
https://github.com/forever208/DDPM-IP/blob/1f767192e8b60e1694c670672d5adfd8b42256f5/guided_diffusion/nn.py#L55-L65
1f767192e8b60e1694c670672d5adfd8b42256f5
ControlNet
github_2023
lllyasviel
python
forward
def forward(self, query_feats, key_feats): """Forward function.""" context = super(ObjectAttentionBlock, self).forward(query_feats, key_feats) output = self.bottleneck(torch.cat([context, query_feats], dim=1)) if self.query_downsample is not None: output = resize(query_feats) return output
"""Forward function."""
https://github.com/lllyasviel/ControlNet/blob/ed85cd1e25a5ed592f7d8178495b4483de0331bf/annotator/uniformer/mmseg/models/decode_heads/ocr_head.py#L73-L81
ed85cd1e25a5ed592f7d8178495b4483de0331bf
llama-hub
github_2023
run-llama
python
load_data
def load_data( self, state: Optional[IssueState] = IssueState.OPEN, labelFilters: Optional[List[Tuple[str, FilterType]]] = None, ) -> List[Document]: """ Load issues from a repository and converts them to documents. Each issue is converted to a document by doing the following: - The text of the document is the concatenation of the title and the body of the issue. - The title of the document is the title of the issue. - The doc_id of the document is the issue number. - The extra_info of the document is a dictionary with the following keys: - state: State of the issue. - created_at: Date when the issue was created. - closed_at: Date when the issue was closed. Only present if the issue is closed. - url: URL of the issue. - assignee: Login of the user assigned to the issue. Only present if the issue is assigned. - The embedding of the document is None. - The doc_hash of the document is None. Args: - state (IssueState): State of the issues to retrieve. Default is IssueState.OPEN. - labelFilters: an optional list of filters to apply to the issue list based on labels. :return: list of documents """ documents = [] page = 1 # Loop until there are no more issues while True: issues: Dict = self._loop.run_until_complete( self._github_client.get_issues( self._owner, self._repo, state=state.value, page=page ) ) if len(issues) == 0: print_if_verbose(self._verbose, "No more issues found, stopping") break print_if_verbose( self._verbose, f"Found {len(issues)} issues in the repo page {page}" ) page += 1 filterCount = 0 for issue in issues: if not self._must_include(labelFilters, issue): filterCount += 1 continue title = issue["title"] body = issue["body"] document = Document( doc_id=str(issue["number"]), text=f"{title}\n{body}", ) extra_info = { "state": issue["state"], "created_at": issue["created_at"], # url is the API URL "url": issue["url"], # source is the HTML URL, more conveninent for humans "source": issue["html_url"], } if issue["closed_at"] is not None: extra_info["closed_at"] = issue["closed_at"] if issue["assignee"] is not None: extra_info["assignee"] = issue["assignee"]["login"] if issue["labels"] is not None: extra_info["labels"] = [label["name"] for label in issue["labels"]] document.extra_info = extra_info documents.append(document) print_if_verbose(self._verbose, f"Resulted in {len(documents)} documents") if labelFilters is not None: print_if_verbose(self._verbose, f"Filtered out {filterCount} issues") return documents
""" Load issues from a repository and converts them to documents. Each issue is converted to a document by doing the following: - The text of the document is the concatenation of the title and the body of the issue. - The title of the document is the title of the issue. - The doc_id of the document is the issue number. - The extra_info of the document is a dictionary with the following keys: - state: State of the issue. - created_at: Date when the issue was created. - closed_at: Date when the issue was closed. Only present if the issue is closed. - url: URL of the issue. - assignee: Login of the user assigned to the issue. Only present if the issue is assigned. - The embedding of the document is None. - The doc_hash of the document is None. Args: - state (IssueState): State of the issues to retrieve. Default is IssueState.OPEN. - labelFilters: an optional list of filters to apply to the issue list based on labels. :return: list of documents """
https://github.com/run-llama/llama-hub/blob/b476d3bd2c963cad9dfe2944de7d6ce408aac65a/llama_hub/github_repo_issues/base.py#L118-L196
b476d3bd2c963cad9dfe2944de7d6ce408aac65a
llama-hub
github_2023
run-llama
python
read_page
def read_page(self, page_id: str) -> str: """Read a page.""" return self._read_block(page_id)
"""Read a page."""
https://github.com/run-llama/llama-hub/blob/b476d3bd2c963cad9dfe2944de7d6ce408aac65a/llama_hub/notion/base.py#L89-L91
b476d3bd2c963cad9dfe2944de7d6ce408aac65a
llama-hub
github_2023
run-llama
python
is_risk_title
def is_risk_title(title: str, filing_type: Optional[str]) -> bool: """Checks to see if the title matches the pattern for the risk heading.""" if filing_type in REPORT_TYPES: return is_10k_risk_title(clean_sec_text(title, lowercase=True)) elif filing_type in S1_TYPES: return is_s1_risk_title(clean_sec_text(title, lowercase=True)) return False
"""Checks to see if the title matches the pattern for the risk heading."""
https://github.com/run-llama/llama-hub/blob/b476d3bd2c963cad9dfe2944de7d6ce408aac65a/llama_hub/sec_filings/prepline_sec_filings/sec_document.py#L359-L365
b476d3bd2c963cad9dfe2944de7d6ce408aac65a
MS-AMP
github_2023
Azure
python
_configure_optimizer
def _configure_optimizer(self, client_optimizer, model_parameters): """Config basic optimizer and optimizer. Args: client_optimizer (torch.optim.Optimizer or callable): client optimizer. model_parameters (list): list of model parameters. """ if client_optimizer is not None: if isinstance(client_optimizer, tuple(self._supported_optims())): client_optimizer.param_groups[:] = [ pg for pg in client_optimizer.param_groups if len(pg['params']) != 0 ] log_dist("Removing param_group that has no 'params' in the client Optimizer", ranks=[0]) basic_optimizer = client_optimizer log_dist('Using client Optimizer as basic optimizer', ranks=[0]) else: basic_optimizer = client_optimizer(model_parameters) log_dist('Using client callable to create basic optimizer', ranks=[0]) else: basic_optimizer = self._configure_basic_optimizer(model_parameters) log_dist(f'Using DeepSpeed Optimizer param name {self.optimizer_name()} as basic optimizer', ranks=[0]) if self.msamp_enabled(): optlevel = self.msamp_optlevel() if optlevel == 'O3': # O3 is for ZeRO and need to cast to O2 for MS-AMP. optlevel = 'O2' use_te = self.msamp_use_te() model, basic_optimizer = msamp_initialize(self.module, basic_optimizer, optlevel, use_te) self._set_client_model(model) # We need to reset param names after msamp initialize. self.param_names = {param: name for name, param in model.named_parameters()} self._check_for_duplicates(basic_optimizer) self.basic_optimizer = basic_optimizer log_dist('DeepSpeed Basic Optimizer = {}'.format(basic_optimizer.__class__.__name__), ranks=[0]) optimizer_wrapper = self._do_optimizer_sanity_check(basic_optimizer) if optimizer_wrapper == ZERO_OPTIMIZATION: self.optimizer = self._configure_zero_optimizer(basic_optimizer) elif optimizer_wrapper == FP8: self.optimizer = self._configure_fp8_optimizer(basic_optimizer, optimizer_wrapper) elif optimizer_wrapper == AMP: amp_params = self.amp_params() log_dist(f'Initializing AMP with these params: {amp_params}', ranks=[0]) model, self.optimizer = amp.initialize(self.module, basic_optimizer, **amp_params) self._set_client_model(model) self._broadcast_model() # TODO: maybe need to broadcast experts differently? elif optimizer_wrapper == FP16: self.optimizer = self._configure_fp16_optimizer(basic_optimizer) elif optimizer_wrapper == BFLOAT16: self.optimizer = self._configure_bf16_optimizer(basic_optimizer) else: self.optimizer = basic_optimizer log_dist('DeepSpeed Final Optimizer = {}'.format(self.optimizer_name()), ranks=[0]) self.compression_scheduler = self._configure_compression_scheduler() self.quantizer = self._configure_quantization()
"""Config basic optimizer and optimizer. Args: client_optimizer (torch.optim.Optimizer or callable): client optimizer. model_parameters (list): list of model parameters. """
https://github.com/Azure/MS-AMP/blob/8de09e00c45ce1ce44dcd0ccf1d4e6e4f4ca04fa/msamp/deepspeed/runtime/engine.py#L63-L124
8de09e00c45ce1ce44dcd0ccf1d4e6e4f4ca04fa
MS-AMP
github_2023
Azure
python
__getattr__
def __getattr__(self, name): """Get attribute by name. Args: name (str): Attribute name. Returns: Attribute value. """ return self.__dict__.get(name, getattr(self.__dict__['ctx'], name))
"""Get attribute by name. Args: name (str): Attribute name. Returns: Attribute value. """
https://github.com/Azure/MS-AMP/blob/8de09e00c45ce1ce44dcd0ccf1d4e6e4f4ca04fa/msamp/te/modules.py#L156-L165
8de09e00c45ce1ce44dcd0ccf1d4e6e4f4ca04fa
optimum-neuron
github_2023
huggingface
python
load_safetensors
def load_safetensors(self, state_dict_dir): """ Lazily load the safetensors by associating each weight with the filename. """ filename = os.path.join(state_dict_dir, _SAFETENSORS_MODEL_FILENAME) with safe_open(filename, framework="pt") as f: keys = f.keys() key_to_filename = dict(zip(keys, [_SAFETENSORS_MODEL_FILENAME] * len(keys))) self._load_from_state_dict_dir(state_dict_dir, key_to_filename)
""" Lazily load the safetensors by associating each weight with the filename. """
https://github.com/huggingface/optimum-neuron/blob/558379f8a1f7f67820ea219323f9af434a6ae2ba/optimum/neuron/backends/hlo/module.py#L174-L182
558379f8a1f7f67820ea219323f9af434a6ae2ba
manta
github_2023
fischermoseley
python
export_csv
def export_csv(self, path): """ Export the capture to a CSV file. Args: path (str): Path to the destination file. Returns: None """ names = [p.name for p in self._probes] values = [self.get_trace(n) for n in names] # Transpose list of lists so that data flows top-to-bottom instead of # left-to-right values_transpose = [list(x) for x in zip(*values)] import csv with open(path, "w") as f: writer = csv.writer(f) writer.writerow(names) writer.writerows(values_transpose)
""" Export the capture to a CSV file. Args: path (str): Path to the destination file. Returns: None """
https://github.com/fischermoseley/manta/blob/e11d9a8315a7a1c91ab5087955d575fc149da06c/src/manta/logic_analyzer/capture.py#L69-L93
e11d9a8315a7a1c91ab5087955d575fc149da06c
vllm
github_2023
vllm-project
python
test_k_equals_zero
@pytest.mark.parametrize('k', [0]) @pytest.mark.parametrize('batch_size', [1, 2, 32]) @pytest.mark.parametrize("acceptance_sampler_method", ["rejection_sampler", "typical_acceptance_sampler"]) @torch.inference_mode() def test_k_equals_zero(k: int, batch_size: int, acceptance_sampler_method: str): """Verify that the SpecDecodeWorker calls the draft and target workers when k is zero. This happens during prefill. """ draft_worker = mock_worker(cls=MultiStepWorker) target_worker = mock_worker() metrics_collector = MagicMock(spec=AsyncMetricsCollector) sampler_output = MagicMock(spec=SamplerOutput) sampler_output.hidden_states = None target_worker.execute_model.return_value = [sampler_output] draft_worker.device = 'cuda' target_worker.device = 'cuda' set_random_seed(1) worker = SpecDecodeWorker( proposer_worker=draft_worker, scorer_worker=target_worker, spec_decode_sampler=mock_spec_decode_sampler( acceptance_sampler_method), disable_logprobs=False, metrics_collector=metrics_collector, ) seq_group_metadata_list, _, _ = create_batch(batch_size, k, prev_output_token_len=0) execute_model_req = ExecuteModelRequest( seq_group_metadata_list=seq_group_metadata_list, num_lookahead_slots=k) out = worker.execute_model(execute_model_req=execute_model_req) assert len(out) == 1, f"expected only one token output when {k=}" assert out[0].sampled_token_probs is None, ( "expect gpu tensor references to be None") assert out[ 0].sampled_token_ids is None, "expect gpu tensor references to be None" draft_worker.execute_model.assert_called_once_with(execute_model_req) target_worker.execute_model.assert_called_once_with(execute_model_req)
"""Verify that the SpecDecodeWorker calls the draft and target workers when k is zero. This happens during prefill. """
https://github.com/vllm-project/vllm/blob/c9e2d644e728e8e93ef2871276ed7a6b39c1d0eb/tests/spec_decode/test_spec_decode_worker.py#L474-L521
c9e2d644e728e8e93ef2871276ed7a6b39c1d0eb
python_motion_planning
github_2023
ai-winter
python
mod2pi
def mod2pi(self, theta: float) -> float: """ Perform modulus operation on 2π. """ return theta - 2.0 * math.pi * math.floor(theta / math.pi / 2.0)
""" Perform modulus operation on 2π. """
https://github.com/ai-winter/python_motion_planning/blob/dc5f45c42488383a488ace21495e70e8129a2b55/python_motion_planning/curve_generation/curve.py#L51-L55
dc5f45c42488383a488ace21495e70e8129a2b55
devine
github_2023
devine-dl
python
clear
@env.group(name="clear", short_help="Clear an environment directory.", context_settings=context_settings) def clear() -> None: """Clear an environment directory."""
"""Clear an environment directory."""
https://github.com/devine-dl/devine/blob/09eda168824157851e30003b196f4851298ec3ac/devine/commands/env.py#L66-L68
09eda168824157851e30003b196f4851298ec3ac
REAL-Video-Enhancer
github_2023
TNTwise
python
checkForNCNN
def checkForNCNN() -> bool: """ function that checks if the pytorch backend is available """ try: from rife_ncnn_vulkan_python import Rife import ncnn try: from upscale_ncnn_py import UPSCALE except Exception: printAndLog( "Warning: Cannot import upscale_ncnn, falling back to default ncnn processing. (Please install vcredlist on your computer to fix this!)" ) return True except ImportError as e: log(str(e)) return False except Exception as e: log(str(e))
""" function that checks if the pytorch backend is available """
https://github.com/TNTwise/REAL-Video-Enhancer/blob/46bda804944d050db1413ea2493b15a295e13441/backend/src/utils/Util.py#L270-L289
46bda804944d050db1413ea2493b15a295e13441
se3_diffusion
github_2023
jasonkyuyim
python
_to_a3m
def _to_a3m(sequences: Sequence[str]) -> str: """Converts sequences to an a3m file.""" names = ["sequence %d" % i for i in range(1, len(sequences) + 1)] a3m = [] for sequence, name in zip(sequences, names): a3m.append(u">" + name + u"\n") a3m.append(sequence + u"\n") return "".join(a3m)
"""Converts sequences to an a3m file."""
https://github.com/jasonkyuyim/se3_diffusion/blob/53359d71cfabc819ffaa571abd2cef736c871a5d/openfold/data/tools/kalign.py#L26-L33
53359d71cfabc819ffaa571abd2cef736c871a5d
home-assistant-petkit
github_2023
RobertD502
python
async_turn_on
async def async_turn_on(self, **kwargs) -> None: """Turn light on.""" if not self.coordinator.client.use_ble_relay: raise HomeAssistantError(f'A PetKit BLE relay is required to control {self.wf_data.data["name"]}') if not self.wf_data.group_relay: raise HomeAssistantError( f'A PetKit BLE relay is required to control {self.wf_data.data["name"]}. ' f'PetKit did not return a valid relay device. If you do have a relay device, ' f'it may temporarily be offline.' ) try: await self.coordinator.client.control_water_fountain(self.wf_data, W5Command.LIGHT_ON) except BluetoothError: raise PetKitBluetoothError(f'Bluetooth connection to {self.wf_data.data["name"]} failed. Please try turning on the light again.') else: self.wf_data.data['settings']['lampRingSwitch'] = 1 self.async_write_ha_state() await asyncio.sleep(1) await self.coordinator.async_request_refresh()
"""Turn light on."""
https://github.com/RobertD502/home-assistant-petkit/blob/a7e5d617678bca56466e3571f72f60dce4aa98bd/custom_components/petkit/switch.py#L171-L191
a7e5d617678bca56466e3571f72f60dce4aa98bd
home-assistant-petkit
github_2023
RobertD502
python
ManualFeed.native_value
@property def native_value(self) -> str: """Always reset to 0,0""" return "0,0"
"""Always reset to 0,0"""
https://github.com/RobertD502/home-assistant-petkit/blob/a7e5d617678bca56466e3571f72f60dce4aa98bd/custom_components/petkit/text.py#L114-L118
a7e5d617678bca56466e3571f72f60dce4aa98bd
Online-HD-Map-Construction-CVPR2023
github_2023
Tsinghua-MARS-Lab
python
_init_branch
def _init_branch(self,): """Initialize classification branch and regression branch of head.""" fc_cls = Linear(self.embed_dims*self.bbox_size, self.cls_out_channels) # fc_cls = Linear(self.embed_dims, self.cls_out_channels) reg_branch = [] for _ in range(self.num_reg_fcs): reg_branch.append(Linear(self.embed_dims, self.embed_dims)) reg_branch.append(nn.LayerNorm(self.embed_dims)) reg_branch.append(nn.ReLU()) if self.discrete_output: reg_branch.append(nn.Linear( self.embed_dims, max(self.canvas_size), bias=True,)) else: reg_branch.append(nn.Linear( self.embed_dims, self.coord_dim, bias=True,)) reg_branch = nn.Sequential(*reg_branch) # add sigmoid or not def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) num_pred = self.transformer.decoder.num_layers if self.iterative: fc_cls = _get_clones(fc_cls, num_pred) reg_branch = _get_clones(reg_branch, num_pred) else: reg_branch = nn.ModuleList( [reg_branch for _ in range(num_pred)]) fc_cls = nn.ModuleList( [fc_cls for _ in range(num_pred)]) self.pre_branches = nn.ModuleDict([ ('cls', fc_cls), ('reg', reg_branch), ])
"""Initialize classification branch and regression branch of head."""
https://github.com/Tsinghua-MARS-Lab/Online-HD-Map-Construction-CVPR2023/blob/775b203aeab56be4248fb5495be452fa404bd29d/src/models/heads/map_element_detector.py#L124-L162
775b203aeab56be4248fb5495be452fa404bd29d
YOWOv2
github_2023
yjh0410
python
_remove_invalid_boxes
def _remove_invalid_boxes( self, detected_boxes, detected_scores, detected_class_labels, detected_masks=None, ): """Removes entries with invalid boxes. A box is invalid if either its xmax is smaller than its xmin, or its ymax is smaller than its ymin. Args: detected_boxes: A float numpy array of size [num_boxes, 4] containing box coordinates in [ymin, xmin, ymax, xmax] format. detected_scores: A float numpy array of size [num_boxes]. detected_class_labels: A int32 numpy array of size [num_boxes]. detected_masks: A uint8 numpy array of size [num_boxes, height, width]. Returns: valid_detected_boxes: A float numpy array of size [num_valid_boxes, 4] containing box coordinates in [ymin, xmin, ymax, xmax] format. valid_detected_scores: A float numpy array of size [num_valid_boxes]. valid_detected_class_labels: A int32 numpy array of size [num_valid_boxes]. valid_detected_masks: A uint8 numpy array of size [num_valid_boxes, height, width]. """ valid_indices = np.logical_and( detected_boxes[:, 0] < detected_boxes[:, 2], detected_boxes[:, 1] < detected_boxes[:, 3], ) detected_boxes = detected_boxes[valid_indices] detected_scores = detected_scores[valid_indices] detected_class_labels = detected_class_labels[valid_indices] if detected_masks is not None: detected_masks = detected_masks[valid_indices] return [ detected_boxes, detected_scores, detected_class_labels, detected_masks, ]
"""Removes entries with invalid boxes. A box is invalid if either its xmax is smaller than its xmin, or its ymax is smaller than its ymin. Args: detected_boxes: A float numpy array of size [num_boxes, 4] containing box coordinates in [ymin, xmin, ymax, xmax] format. detected_scores: A float numpy array of size [num_boxes]. detected_class_labels: A int32 numpy array of size [num_boxes]. detected_masks: A uint8 numpy array of size [num_boxes, height, width]. Returns: valid_detected_boxes: A float numpy array of size [num_valid_boxes, 4] containing box coordinates in [ymin, xmin, ymax, xmax] format. valid_detected_scores: A float numpy array of size [num_valid_boxes]. valid_detected_class_labels: A int32 numpy array of size [num_valid_boxes]. valid_detected_masks: A uint8 numpy array of size [num_valid_boxes, height, width]. """
https://github.com/yjh0410/YOWOv2/blob/9e8d23c11ad26ef5e6cfc01e3f4f1112dee634bf/evaluator/ava_evaluation/per_image_evaluation.py#L411-L453
9e8d23c11ad26ef5e6cfc01e3f4f1112dee634bf
grove
github_2023
hashicorp-forge
python
finalize
def finalize(self): """Performs a final set of operations after logs have been saved.""" return
"""Performs a final set of operations after logs have been saved."""
https://github.com/hashicorp-forge/grove/blob/a2b4bea0e15ef1a2a80bf092f4a7cb835901ecbd/grove/processors/__init__.py#L54-L57
a2b4bea0e15ef1a2a80bf092f4a7cb835901ecbd
CEDNet
github_2023
zhanggang001
python
center_of_mass
def center_of_mass(mask, esp=1e-6): """Calculate the centroid coordinates of the mask. Args: mask (Tensor): The mask to be calculated, shape (h, w). esp (float): Avoid dividing by zero. Default: 1e-6. Returns: tuple[Tensor]: the coordinates of the center point of the mask. - center_h (Tensor): the center point of the height. - center_w (Tensor): the center point of the width. """ h, w = mask.shape grid_h = torch.arange(h, device=mask.device)[:, None] grid_w = torch.arange(w, device=mask.device) normalizer = mask.sum().float().clamp(min=esp) center_h = (mask * grid_h).sum() / normalizer center_w = (mask * grid_w).sum() / normalizer return center_h, center_w
"""Calculate the centroid coordinates of the mask. Args: mask (Tensor): The mask to be calculated, shape (h, w). esp (float): Avoid dividing by zero. Default: 1e-6. Returns: tuple[Tensor]: the coordinates of the center point of the mask. - center_h (Tensor): the center point of the height. - center_w (Tensor): the center point of the width. """
https://github.com/zhanggang001/CEDNet/blob/305d6371db427bcf3c93b5f85a2609d05e9eb4f1/ced-mmdet/mmdet/core/utils/misc.py#L168-L187
305d6371db427bcf3c93b5f85a2609d05e9eb4f1
CEDNet
github_2023
zhanggang001
python
_init_predictor
def _init_predictor(self): """Initialize predictor layers of the head.""" self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
"""Initialize predictor layers of the head."""
https://github.com/zhanggang001/CEDNet/blob/305d6371db427bcf3c93b5f85a2609d05e9eb4f1/ced-mmdet/mmdet/models/dense_heads/anchor_free_head.py#L153-L157
305d6371db427bcf3c93b5f85a2609d05e9eb4f1
not1mm
github_2023
mbridak
python
cty_lookup
def cty_lookup(self, callsign: str) -> list: """Lookup callsign in cty.dat file. Parameters ---------- callsign : str callsign to lookup Returns ------- return : list list of dicts containing the callsign and the station. """ callsign = callsign.upper() for count in reversed(range(len(callsign))): searchitem = callsign[: count + 1] result = { key: val for key, val in self.ctyfile.items() if key == searchitem } if not result: continue if result.get(searchitem).get("exact_match"): if searchitem == callsign: return result continue return result
"""Lookup callsign in cty.dat file. Parameters ---------- callsign : str callsign to lookup Returns ------- return : list list of dicts containing the callsign and the station. """
https://github.com/mbridak/not1mm/blob/c7dbdaf4e509a865a1bf64bc037e0efac6b4e200/not1mm/__main__.py#L1972-L1997
c7dbdaf4e509a865a1bf64bc037e0efac6b4e200
not1mm
github_2023
mbridak
python
change_mode
def change_mode(self, mode: str, intended_freq=None) -> None: """ Change mode to given mode. Send the new mode to the rig control. Set the band indicator. Set the window title. Clear the inputs. Read the CW macros. Parameters ---------- mode : str Mode to change to. Returns ------- None """ if mode in ("CW", "CW-U", "CW-L", "CWR"): if self.rig_control and self.rig_control.online: self.rig_control.set_mode(self.rig_control.last_cw_mode) if self.pref.get("cwtype") == 3 and self.rig_control is not None: if self.rig_control.interface == "flrig": self.cwspeed_spinbox_changed() self.rig_control.cat.set_flrig_cw_send(True) else: self.setmode("CW") self.radio_state["mode"] = "CW" band = getband(str(self.radio_state.get("vfoa", "0.0"))) self.set_band_indicator(band) self.set_window_title() self.clearinputs() self.read_macros() return if mode in ( "DIGI-U", "DIGI-L", "RTTY", "RTTY-R", "LSB-D", "USB-D", "AM-D", "FM-D", ): if self.rig_control and self.rig_control.online: self.rig_control.set_mode(self.rig_control.last_data_mode) else: self.radio_state["mode"] = "RTTY" self.setmode("RTTY") band = getband(str(self.radio_state.get("vfoa", "0.0"))) self.set_band_indicator(band) self.set_window_title() self.clearinputs() self.read_macros() return if mode == "SSB": if intended_freq: freq = intended_freq else: freq = int(self.radio_state.get("vfoa", 0)) if freq > 10000000: self.radio_state["mode"] = "USB" else: self.radio_state["mode"] = "LSB" if self.rig_control and self.rig_control.online: self.rig_control.set_mode(self.radio_state.get("mode")) else: self.setmode("SSB") band = getband(str(self.radio_state.get("vfoa", "0.0"))) self.set_band_indicator(band) self.set_window_title() self.clearinputs() self.read_macros()
""" Change mode to given mode. Send the new mode to the rig control. Set the band indicator. Set the window title. Clear the inputs. Read the CW macros. Parameters ---------- mode : str Mode to change to. Returns ------- None """
https://github.com/mbridak/not1mm/blob/c7dbdaf4e509a865a1bf64bc037e0efac6b4e200/not1mm/__main__.py#L3433-L3507
c7dbdaf4e509a865a1bf64bc037e0efac6b4e200
sd-webui-controlnet
github_2023
Mikubill
python
preprocess
def preprocess( img: np.ndarray, out_bbox, input_size: Tuple[int, int] = (192, 256) ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """Do preprocessing for DWPose model inference. Args: img (np.ndarray): Input image in shape. input_size (tuple): Input image size in shape (w, h). Returns: tuple: - resized_img (np.ndarray): Preprocessed image. - center (np.ndarray): Center of image. - scale (np.ndarray): Scale of image. """ # get shape of image img_shape = img.shape[:2] out_img, out_center, out_scale = [], [], [] if len(out_bbox) == 0: out_bbox = [[0, 0, img_shape[1], img_shape[0]]] for i in range(len(out_bbox)): x0 = out_bbox[i][0] y0 = out_bbox[i][1] x1 = out_bbox[i][2] y1 = out_bbox[i][3] bbox = np.array([x0, y0, x1, y1]) # get center and scale center, scale = bbox_xyxy2cs(bbox, padding=1.25) # do affine transformation resized_img, scale = top_down_affine(input_size, scale, center, img) # normalize image mean = np.array([123.675, 116.28, 103.53]) std = np.array([58.395, 57.12, 57.375]) resized_img = (resized_img - mean) / std out_img.append(resized_img) out_center.append(center) out_scale.append(scale) return out_img, out_center, out_scale
"""Do preprocessing for DWPose model inference. Args: img (np.ndarray): Input image in shape. input_size (tuple): Input image size in shape (w, h). Returns: tuple: - resized_img (np.ndarray): Preprocessed image. - center (np.ndarray): Center of image. - scale (np.ndarray): Scale of image. """
https://github.com/Mikubill/sd-webui-controlnet/blob/56cec5b2958edf3b1807b7e7b2b1b5186dbd2f81/annotator/openpose/cv_ox_pose.py#L6-L48
56cec5b2958edf3b1807b7e7b2b1b5186dbd2f81
sd-webui-controlnet
github_2023
Mikubill
python
__init__
def __init__(self): """ Init method. """ super().__init__()
""" Init method. """
https://github.com/Mikubill/sd-webui-controlnet/blob/56cec5b2958edf3b1807b7e7b2b1b5186dbd2f81/annotator/teed/Xsmish.py#L33-L37
56cec5b2958edf3b1807b7e7b2b1b5186dbd2f81
Time-Series-Library
github_2023
thuml
python
forward
def forward(self, coeffs): """ Args: coeffs (yl, yh): tuple of lowpass and bandpass coefficients, should match the format returned by DWT1DForward. Returns: Reconstructed input of shape :math:`(N, C_{in}, L_{in})` Note: Can have None for any of the highpass scales and will treat the values as zeros (not in an efficient way though). """ x0, highs = coeffs assert x0.ndim == 3, "Can only handle 3d inputs (N, C, L)" mode = mode_to_int(self.mode) # Do a multilevel inverse transform for x1 in highs[::-1]: if x1 is None: x1 = torch.zeros_like(x0) # 'Unpad' added signal if x0.shape[-1] > x1.shape[-1]: x0 = x0[..., :-1] x0 = SFB1D.apply(x0, x1, self.g0, self.g1, mode, self.use_amp) return x0
""" Args: coeffs (yl, yh): tuple of lowpass and bandpass coefficients, should match the format returned by DWT1DForward. Returns: Reconstructed input of shape :math:`(N, C_{in}, L_{in})` Note: Can have None for any of the highpass scales and will treat the values as zeros (not in an efficient way though). """
https://github.com/thuml/Time-Series-Library/blob/4ddf869d999424b037d451a4757e36813d66a13b/layers/DWT_Decomposition.py#L224-L249
4ddf869d999424b037d451a4757e36813d66a13b
tiktok-uploader
github_2023
wkaisertexas
python
get_auth_args
def get_auth_args(): """ Generates a parser which is used to get all of the authentication information """ parser = ArgumentParser( description='TikTok Auth is a program which can log you into multiple accounts sequentially' ) # authentication arguments parser.add_argument('-o', '--output', default='tmp', help='The output folder to save the cookies to') parser.add_argument('-i', '--input', help='A csv file with username and password') # parser.add_argument('-h', '--header', default=True, # help='The header of the csv file which contains the username and password') parser.add_argument('-u', '--username', help='Your TikTok email / username') parser.add_argument('-p', '--password', help='Your TikTok password') return parser.parse_args()
""" Generates a parser which is used to get all of the authentication information """
https://github.com/wkaisertexas/tiktok-uploader/blob/59dc97852b70a2f13be6a48c6046f089baa1054e/src/tiktok_uploader/cli.py#L112-L129
59dc97852b70a2f13be6a48c6046f089baa1054e
nastools
github_2023
mhdpdp
python
get_transfer_statistics
def get_transfer_statistics(self, data=None): """ 查询转移历史统计数据 """ MovieChartLabels = [] MovieNums = [] TvChartData = {} TvNums = [] AnimeNums = [] for statistic in self.dbhelper.get_transfer_statistics(): if statistic[0] == "电影": MovieChartLabels.append(statistic[1]) MovieNums.append(statistic[2]) else: if not TvChartData.get(statistic[1]): TvChartData[statistic[1]] = {"tv": 0, "anime": 0} if statistic[0] == "电视剧": TvChartData[statistic[1]]["tv"] += statistic[2] elif statistic[0] == "动漫": TvChartData[statistic[1]]["anime"] += statistic[2] TvChartLabels = list(TvChartData) for tv_data in TvChartData.values(): TvNums.append(tv_data.get("tv")) AnimeNums.append(tv_data.get("anime")) return { "code": 0, "MovieChartLabels": MovieChartLabels, "MovieNums": MovieNums, "TvChartLabels": TvChartLabels, "TvNums": TvNums, "AnimeNums": AnimeNums }
""" 查询转移历史统计数据 """
https://github.com/mhdpdp/nastools/blob/b9fa7cad74649fa4cd70e8bde378aa02a2d27c29/web/action.py#L3396-L3428
b9fa7cad74649fa4cd70e8bde378aa02a2d27c29
nastools
github_2023
mhdpdp
python
FilterRuleUpdate.post
@filterrule.doc(parser=parser) def post(self): """ 新增/修改规则 """ return WebAction().api_action(cmd='add_filterrule', data=self.parser.parse_args())
""" 新增/修改规则 """
https://github.com/mhdpdp/nastools/blob/b9fa7cad74649fa4cd70e8bde378aa02a2d27c29/web/apiv1.py#L1803-L1808
b9fa7cad74649fa4cd70e8bde378aa02a2d27c29
PyMammotion
github_2023
mikey0000
python
DeviceType.is_luba1
@staticmethod def is_luba1(device_name: str, product_key: str = ""): """Check if the given device is of type LUBA. This function determines if the device specified by 'device_name' is of type LUBA. If 'product_key' is provided, it is used to further identify the device type. Args: device_name (str): The name of the device. product_key (str?): The product key associated with the device. Defaults to "". Returns: bool: True if the device is of type LUBA, False otherwise. """ if not product_key: device_type = DeviceType.value_of_str(device_name) else: device_type = DeviceType.value_of_str(device_name, product_key) return device_type.get_value() == DeviceType.LUBA.get_value()
"""Check if the given device is of type LUBA. This function determines if the device specified by 'device_name' is of type LUBA. If 'product_key' is provided, it is used to further identify the device type. Args: device_name (str): The name of the device. product_key (str?): The product key associated with the device. Defaults to "". Returns: bool: True if the device is of type LUBA, False otherwise. """
https://github.com/mikey0000/PyMammotion/blob/2c5e100b8cc50d393ff5ec460c612e1da5ec4935/pymammotion/utility/device_type.py#L127-L149
2c5e100b8cc50d393ff5ec460c612e1da5ec4935
GHOST
github_2023
dvl-tum
python
update
def update(oids, hids, dists, indices, events, m): """ tracks : my results of shape {tr_id: {'id', 'im_index', 'max_iou', 'bbox'} num_frames : number of frames """ import pandas as pd asso = dict() hypo = dict() cols = ['Type', 'id', 'frame', 'tr_id', 'iou', 'w', 'h'] events = pd.DataFrame(columns=cols) for i in range(num_frames): for tr_id, tr in self.tracks.items(): for t in tr: if t['im_index'] == i: if t['id'] in asso.keys(): h_o = asso[t['id']] if tr_id == h_o: TYPE = 'match' else: if h_o in hypo.keys(): TYPE = 'switch' else: TYPE = 'ascend' elif tr_id == -1: TYPE = 'fp' elif tr_id in hypo.keys(): if t['id'] not in asso.keys(): TYPE = 'migrate' elif hypo[tr_id] != t['id']: TYPE = 'transfer' df = pd.DataFrame([[TYPE, t['id'], i, tr_id, t['iou'], t['bbox'] [2]-t['bbox'][0], t['bbox'][3]-t['bbox'][1]]], columns=cols) #self.dirty_events = True oids = np.asarray(oids) oids_masked = np.zeros_like(oids, dtype=np.bool) hids = np.asarray(hids) hids_masked = np.zeros_like(hids, dtype=np.bool) dists = np.atleast_2d(dists).astype(float).reshape( oids.shape[0], hids.shape[0]).copy() if frameid is None: assert self.auto_id, 'auto-id is not enabled' if len(self._indices['FrameId']) > 0: frameid = self._indices['FrameId'][-1] + 1 else: frameid = 0 else: assert not self.auto_id, 'Cannot provide frame id when auto-id is enabled' eid = itertools.count() # 0. Record raw events no = len(oids) nh = len(hids) # Add a RAW event simply to ensure the frame is counted. indices.append(frameid, next(eid)) events.append('RAW', np.nan, np.nan, np.nan) # There must be at least one RAW event per object and hypothesis. # Record all finite distances as RAW events. valid_i, valid_j = np.where(np.isfinite(dists)) valid_dists = dists[valid_i, valid_j] for i, j, dist_ij in zip(valid_i, valid_j, valid_dists): indices.append(frameid, next(eid)) events.append('RAW', oids[i], hids[j], dist_ij) # Add a RAW event for objects and hypotheses that were present but did # not overlap with anything. used_i = np.unique(valid_i) used_j = np.unique(valid_j) unused_i = np.setdiff1d(np.arange(no), used_i) unused_j = np.setdiff1d(np.arange(nh), used_j) for oid in oids[unused_i]: indices.append(frameid, next(eid)) events.append('RAW', oid, np.nan, np.nan) for hid in hids[unused_j]: indices.append(frameid, next(eid)) events.append('RAW', np.nan, hid, np.nan) if oids.size * hids.size > 0: # 1. Try to re-establish tracks from previous correspondences for i in range(oids.shape[0]): # No need to check oids_masked[i] here. if oids[i] not in self.m: continue hprev = self.m[oids[i]] j, = np.where(~hids_masked & (hids == hprev)) if j.shape[0] == 0: continue j = j[0] if np.isfinite(dists[i, j]): o = oids[i] h = hids[j] oids_masked[i] = True hids_masked[j] = True self.m[oids[i]] = hids[j] indices.append(frameid, next(eid)) events.append('MATCH', oids[i], hids[j], dists[i, j]) self.last_match[o] = frameid self.hypHistory[h] = frameid # 2. Try to remaining objects/hypotheses dists[oids_masked, :] = np.nan dists[:, hids_masked] = np.nan rids, cids = linear_sum_assignment(dists) for i, j in zip(rids, cids): if not np.isfinite(dists[i, j]): continue o = oids[i] h = hids[j] is_switch = (o in self.m and self.m[o] != h and abs(frameid - self.last_occurrence[o]) <= self.max_switch_time) cat1 = 'SWITCH' if is_switch else 'MATCH' if cat1 == 'SWITCH': if h not in self.hypHistory: subcat = 'ASCEND' indices.append(frameid, next(eid)) events.append(subcat, oids[i], hids[j], dists[i, j]) # ignore the last condition temporarily is_transfer = (h in self.res_m and self.res_m[h] != o) # is_transfer = (h in self.res_m and # self.res_m[h] != o and # abs(frameid - self.last_occurrence[o]) <= self.max_switch_time) cat2 = 'TRANSFER' if is_transfer else 'MATCH' if cat2 == 'TRANSFER': if o not in self.last_match: subcat = 'MIGRATE' indices.append(frameid, next(eid)) events.append(subcat, oids[i], hids[j], dists[i, j]) indices.append(frameid, next(eid)) events.append(cat2, oids[i], hids[j], dists[i, j]) if vf != '' and (cat1 != 'MATCH' or cat2 != 'MATCH'): if cat1 == 'SWITCH': vf.write('%s %d %d %d %d %d\n' % ( subcat[:2], o, self.last_match[o], self.m[o], frameid, h)) if cat2 == 'TRANSFER': vf.write('%s %d %d %d %d %d\n' % ( subcat[:2], h, self.hypHistory[h], self.res_m[h], frameid, o)) self.hypHistory[h] = frameid self.last_match[o] = frameid indices.append(frameid, next(eid)) events.append(cat1, oids[i], hids[j], dists[i, j]) oids_masked[i] = True hids_masked[j] = True self.m[o] = h self.res_m[h] = o # 3. All remaining objects are missed for o in oids[~oids_masked]: indices.append(frameid, next(eid)) events.append('MISS', o, np.nan, np.nan) if vf != '': vf.write('FN %d %d\n' % (frameid, o)) # 4. All remaining hypotheses are false alarms for h in hids[~hids_masked]: indices.append(frameid, next(eid)) events.append('FP', np.nan, h, np.nan) if vf != '': vf.write('FP %d %d\n' % (frameid, h)) # 5. Update occurance state for o in oids: self.last_occurrence[o] = frameid return frameid
""" tracks : my results of shape {tr_id: {'id', 'im_index', 'max_iou', 'bbox'} num_frames : number of frames """
https://github.com/dvl-tum/GHOST/blob/755a5dacfcf4dd122a4cac73061b24e9c84f3c19/src/utils.py#L119-L294
755a5dacfcf4dd122a4cac73061b24e9c84f3c19
safari
github_2023
HazyResearch
python
checkpoint_filter_fn
def checkpoint_filter_fn(state_dict, model): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} if 'model' in state_dict: # For deit models state_dict = state_dict['model'] for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k and len(v.shape) < 4: # For old models that I trained prior to conv based patchification O, I, H, W = model.patch_embed.proj.weight.shape v = v.reshape(O, -1, H, W) elif k == 'pos_embed' and v.shape != model.pos_embed.shape: # To resize pos embedding when using model at different size from pretrained weights v = resize_pos_embed(v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) out_dict[k] = v return out_dict
""" convert patch embedding weight from manual patchify + linear proj to conv"""
https://github.com/HazyResearch/safari/blob/02220c69d247e5473616cd053a443ad99fd2559b/src/models/baselines/vit_all.py#L341-L357
02220c69d247e5473616cd053a443ad99fd2559b
safari
github_2023
HazyResearch
python
setup_filters
def setup_filters(self, filter_cls, filter_args): "Initializes the explicit and implicit filters" assert self.order >= 2, f'Order must be at least 2, (got {self.order})' total_width = self.d_model * self.inner_factor * (self.order + 1) self.short_filter = nn.Conv1d( in_channels=total_width, out_channels=total_width, kernel_size=self.short_filter_order, groups=total_width, padding=self.short_filter_order - 1 ) filter_cls = instantiate(registry.layer, filter_cls, partial=True) self.filter_fn = filter_cls( self.head_dim * self.inner_factor * (self.order - 1), order=self.filter_order, seq_len=self.l_max, channels=1, dropout=self.filter_dropout, **filter_args ) if self.jit_filter: self.filter_fn = torch.jit.script(self.filter_fn, self.L)
"Initializes the explicit and implicit filters"
https://github.com/HazyResearch/safari/blob/02220c69d247e5473616cd053a443ad99fd2559b/src/models/sequence/hyena.py#L280-L303
02220c69d247e5473616cd053a443ad99fd2559b
VAD
github_2023
hustvl
python
custom_weight_dir_reduce_loss
@mmcv.jit(derivate=True, coderize=True) def custom_weight_dir_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): num_sample, num_dir weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ # if weight is specified, apply element-wise weight if weight is not None: loss = loss * weight # if avg_factor is not specified, just reduce the loss if avg_factor is None: raise ValueError('avg_factor should not be none for OrderedPtsL1Loss') # loss = reduce_loss(loss, reduction) else: # if reduction is mean, then average the loss by avg_factor if reduction == 'mean': # import pdb;pdb.set_trace() # loss = loss.permute(1,0,2,3).contiguous() loss = loss.sum() loss = loss / avg_factor # if reduction is 'none', then do nothing, otherwise raise an error elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss
"""Apply element-wise weight and reduce loss. Args: loss (Tensor): num_sample, num_dir weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """
https://github.com/hustvl/VAD/blob/70bb364aa3f33316960da06053c0d168628fb15f/projects/mmdet3d_plugin/VAD/utils/CD_loss.py#L33-L64
70bb364aa3f33316960da06053c0d168628fb15f
unicom
github_2023
deepglint
python
add_model_config
def add_model_config(path): """add model config path or file and update registry""" if not isinstance(path, Path): path = Path(path) _MODEL_CONFIG_PATHS.append(path) _rescan_model_configs()
"""add model config path or file and update registry"""
https://github.com/deepglint/unicom/blob/7e503c908a1b9deb6cd2c2d6287500b95233884f/downstream/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/factory.py#L62-L67
7e503c908a1b9deb6cd2c2d6287500b95233884f
nas-tools
github_2023
receyuki
python
WebAction.set_config_value
@staticmethod def set_config_value(cfg, cfg_key, cfg_value): """ 根据Key设置配置值 """ # 密码 if cfg_key == "app.login_password": if cfg_value and not cfg_value.startswith("[hash]"): cfg['app']['login_password'] = "[hash]%s" % generate_password_hash( cfg_value) else: cfg['app']['login_password'] = cfg_value or "password" return cfg # 代理 if cfg_key == "app.proxies": if cfg_value: if not cfg_value.startswith("http") and not cfg_value.startswith("sock"): cfg['app']['proxies'] = { "https": "http://%s" % cfg_value, "http": "http://%s" % cfg_value} else: cfg['app']['proxies'] = {"https": "%s" % cfg_value, "http": "%s" % cfg_value} else: cfg['app']['proxies'] = {"https": None, "http": None} return cfg # 豆瓣用户列表 if cfg_key == "douban.users": vals = cfg_value.split(",") cfg['douban']['users'] = vals return cfg # 索引器 if cfg_key == "jackett.indexers": vals = cfg_value.split("\n") cfg['jackett']['indexers'] = vals return cfg # 最大支持三层赋值 keys = cfg_key.split(".") if keys: if len(keys) == 1: cfg[keys[0]] = cfg_value elif len(keys) == 2: if not cfg.get(keys[0]): cfg[keys[0]] = {} cfg[keys[0]][keys[1]] = cfg_value elif len(keys) == 3: if cfg.get(keys[0]): if not cfg[keys[0]].get(keys[1]) or isinstance(cfg[keys[0]][keys[1]], str): cfg[keys[0]][keys[1]] = {} cfg[keys[0]][keys[1]][keys[2]] = cfg_value else: cfg[keys[0]] = {} cfg[keys[0]][keys[1]] = {} cfg[keys[0]][keys[1]][keys[2]] = cfg_value return cfg
""" 根据Key设置配置值 """
https://github.com/receyuki/nas-tools/blob/e3a43d4f0896db49de02e9a9201ef2e5877af56f/web/action.py#L299-L353
e3a43d4f0896db49de02e9a9201ef2e5877af56f
ChatFred
github_2023
chrislemke
python
post
async def post(self) -> "MultiDictProxy[Union[str, bytes, FileField]]": """Return POST parameters.""" if self._post is not None: return self._post if self._method not in self.POST_METHODS: self._post = MultiDictProxy(MultiDict()) return self._post content_type = self.content_type if content_type not in ( "", "application/x-www-form-urlencoded", "multipart/form-data", ): self._post = MultiDictProxy(MultiDict()) return self._post out: MultiDict[Union[str, bytes, FileField]] = MultiDict() if content_type == "multipart/form-data": multipart = await self.multipart() max_size = self._client_max_size field = await multipart.next() while field is not None: size = 0 field_ct = field.headers.get(hdrs.CONTENT_TYPE) if isinstance(field, BodyPartReader): assert field.name is not None # Note that according to RFC 7578, the Content-Type header # is optional, even for files, so we can't assume it's # present. # https://tools.ietf.org/html/rfc7578#section-4.4 if field.filename: # store file in temp file tmp = tempfile.TemporaryFile() chunk = await field.read_chunk(size=2**16) while chunk: chunk = field.decode(chunk) tmp.write(chunk) size += len(chunk) if 0 < max_size < size: tmp.close() raise HTTPRequestEntityTooLarge( max_size=max_size, actual_size=size ) chunk = await field.read_chunk(size=2**16) tmp.seek(0) if field_ct is None: field_ct = "application/octet-stream" ff = FileField( field.name, field.filename, cast(io.BufferedReader, tmp), field_ct, field.headers, ) out.add(field.name, ff) else: # deal with ordinary data value = await field.read(decode=True) if field_ct is None or field_ct.startswith("text/"): charset = field.get_charset(default="utf-8") out.add(field.name, value.decode(charset)) else: out.add(field.name, value) size += len(value) if 0 < max_size < size: raise HTTPRequestEntityTooLarge( max_size=max_size, actual_size=size ) else: raise ValueError( "To decode nested multipart you need " "to use custom reader", ) field = await multipart.next() else: data = await self.read() if data: charset = self.charset or "utf-8" out.extend( parse_qsl( data.rstrip().decode(charset), keep_blank_values=True, encoding=charset, ) ) self._post = MultiDictProxy(out) return self._post
"""Return POST parameters."""
https://github.com/chrislemke/ChatFred/blob/4356986c2cc3eaf57b5329774c9e593c01554789/workflow/src/libs/aiohttp/web_request.py#L677-L771
4356986c2cc3eaf57b5329774c9e593c01554789
ChatFred
github_2023
chrislemke
python
add_put
def add_put(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute: """Shortcut for add_route with method PUT.""" return self.add_route(hdrs.METH_PUT, path, handler, **kwargs)
"""Shortcut for add_route with method PUT."""
https://github.com/chrislemke/ChatFred/blob/4356986c2cc3eaf57b5329774c9e593c01554789/workflow/src/libs/aiohttp/web_urldispatcher.py#L1168-L1170
4356986c2cc3eaf57b5329774c9e593c01554789
ChatFred
github_2023
chrislemke
python
create_memory_object_stream
def create_memory_object_stream( max_buffer_size: float = 0, item_type: type[T_Item] | None = None ) -> tuple[MemoryObjectSendStream[Any], MemoryObjectReceiveStream[Any]]: """ Create a memory object stream. :param max_buffer_size: number of items held in the buffer until ``send()`` starts blocking :param item_type: type of item, for marking the streams with the right generic type for static typing (not used at run time) :return: a tuple of (send stream, receive stream) """ if max_buffer_size != math.inf and not isinstance(max_buffer_size, int): raise ValueError("max_buffer_size must be either an integer or math.inf") if max_buffer_size < 0: raise ValueError("max_buffer_size cannot be negative") state: MemoryObjectStreamState = MemoryObjectStreamState(max_buffer_size) return MemoryObjectSendStream(state), MemoryObjectReceiveStream(state)
""" Create a memory object stream. :param max_buffer_size: number of items held in the buffer until ``send()`` starts blocking :param item_type: type of item, for marking the streams with the right generic type for static typing (not used at run time) :return: a tuple of (send stream, receive stream) """
https://github.com/chrislemke/ChatFred/blob/4356986c2cc3eaf57b5329774c9e593c01554789/workflow/src/libs/anyio/_core/_streams.py#L29-L47
4356986c2cc3eaf57b5329774c9e593c01554789
ChatFred
github_2023
chrislemke
python
_colors_to_code
def _colors_to_code(self, fg_color: str, bg_color: str) -> Iterable[str]: """ Return a tuple with the vt100 values that represent this color. """ # When requesting ANSI colors only, and both fg/bg color were converted # to ANSI, ensure that the foreground and background color are not the # same. (Unless they were explicitly defined to be the same color.) fg_ansi = "" def get(color: str, bg: bool) -> list[int]: nonlocal fg_ansi table = BG_ANSI_COLORS if bg else FG_ANSI_COLORS if not color or self.color_depth == ColorDepth.DEPTH_1_BIT: return [] # 16 ANSI colors. (Given by name.) elif color in table: return [table[color]] # RGB colors. (Defined as 'ffffff'.) else: try: rgb = self._color_name_to_rgb(color) except ValueError: return [] # When only 16 colors are supported, use that. if self.color_depth == ColorDepth.DEPTH_4_BIT: if bg: # Background. if fg_color != bg_color: exclude = [fg_ansi] else: exclude = [] code, name = _16_bg_colors.get_code(rgb, exclude=exclude) return [code] else: # Foreground. code, name = _16_fg_colors.get_code(rgb) fg_ansi = name return [code] # True colors. (Only when this feature is enabled.) elif self.color_depth == ColorDepth.DEPTH_24_BIT: r, g, b = rgb return [(48 if bg else 38), 2, r, g, b] # 256 RGB colors. else: return [(48 if bg else 38), 5, _256_colors[rgb]] result: list[int] = [] result.extend(get(fg_color, False)) result.extend(get(bg_color, True)) return map(str, result)
""" Return a tuple with the vt100 values that represent this color. """
https://github.com/chrislemke/ChatFred/blob/4356986c2cc3eaf57b5329774c9e593c01554789/workflow/src/libs/prompt_toolkit/output/vt100.py#L319-L374
4356986c2cc3eaf57b5329774c9e593c01554789
ChatFred
github_2023
chrislemke
python
extract_from_urllib3
def extract_from_urllib3(): """ Undo monkey-patching by :func:`inject_into_urllib3`. """ util.SSLContext = orig_util_SSLContext util.ssl_.SSLContext = orig_util_SSLContext util.HAS_SNI = orig_util_HAS_SNI util.ssl_.HAS_SNI = orig_util_HAS_SNI util.IS_SECURETRANSPORT = False util.ssl_.IS_SECURETRANSPORT = False
""" Undo monkey-patching by :func:`inject_into_urllib3`. """
https://github.com/chrislemke/ChatFred/blob/4356986c2cc3eaf57b5329774c9e593c01554789/workflow/src/libs/urllib3/contrib/securetransport.py#L201-L210
4356986c2cc3eaf57b5329774c9e593c01554789
ChatFred
github_2023
chrislemke
python
Url.hostname
@property def hostname(self): """For backwards-compatibility with urlparse. We're nice like that.""" return self.host
"""For backwards-compatibility with urlparse. We're nice like that."""
https://github.com/chrislemke/ChatFred/blob/4356986c2cc3eaf57b5329774c9e593c01554789/workflow/src/libs/urllib3/util/url.py#L109-L112
4356986c2cc3eaf57b5329774c9e593c01554789
catalyst
github_2023
PennyLaneAI
python
test_decomposition_nested
def test_decomposition_nested(self): """Tests decompositions of nested controlled operations""" ctrl_op = C_ctrl(C_ctrl(lambda: qml.RZ(0.123, wires=0), control=1), control=2)() expected = [ qml.ops.Controlled(qml.RZ(0.123, wires=0), control_wires=[1, 2]), ] assert ctrl_op.decomposition() == expected
"""Tests decompositions of nested controlled operations"""
https://github.com/PennyLaneAI/catalyst/blob/729d468ad1bec692242c6b20560a2b9922debb31/frontend/test/pytest/test_quantum_control.py#L1654-L1661
729d468ad1bec692242c6b20560a2b9922debb31
catalyst
github_2023
PennyLaneAI
python
test_pattern_matching_optimization
@pytest.mark.xfail( reason="QJIT fails with ValueError: Eagerly computing the adjoint (lazy=False) is only supported on single operators." ) def test_pattern_matching_optimization(backend): """Test pattern_matching_optimization""" def qnode_builder(device_name): """Builder""" ops = [qml.S(0), qml.S(0), qml.Z(0)] pattern = qml.tape.QuantumTape(ops) @partial(qml.transforms.pattern_matching_optimization, pattern_tapes=[pattern]) @qml.qnode(qml.device(device_name, wires=5)) def qfunc(): qml.S(wires=0) qml.Z(0) qml.S(wires=1) qml.CZ(wires=[0, 1]) qml.S(wires=1) qml.S(wires=2) qml.CZ(wires=[1, 2]) qml.S(wires=2) return qml.expval(qml.X(0)) return qfunc qnode_control = qnode_builder("default.qubit") qnode_backend = qnode_builder(backend) jax_jit = jax.jit(qnode_control) compiled = qjit(qnode_backend) expected = jax_jit() observed = compiled() _, expected_shape = jax.tree_util.tree_flatten(expected) _, observed_shape = jax.tree_util.tree_flatten(observed) assert np.allclose(expected, observed) assert expected_shape == observed_shape
"""Test pattern_matching_optimization"""
https://github.com/PennyLaneAI/catalyst/blob/729d468ad1bec692242c6b20560a2b9922debb31/frontend/test/pytest/test_transform.py#L1340-L1379
729d468ad1bec692242c6b20560a2b9922debb31
ControlLoRA
github_2023
HighCWu
python
forward
def forward(self, query, key, value, query_pos=None, key_pos=None, attn_masks=None, query_key_padding_mask=None, key_padding_mask=None, **kwargs): """Forward function for `TransformerCoder`. Args: query (Tensor): Input query with shape `(num_queries, bs, embed_dims)`. key (Tensor): The key tensor with shape `(num_keys, bs, embed_dims)`. value (Tensor): The value tensor with shape `(num_keys, bs, embed_dims)`. query_pos (Tensor): The positional encoding for `query`. Default: None. key_pos (Tensor): The positional encoding for `key`. Default: None. attn_masks (List[Tensor], optional): Each element is 2D Tensor which is used in calculation of corresponding attention in operation_order. Default: None. query_key_padding_mask (Tensor): ByteTensor for `query`, with shape [bs, num_queries]. Only used in self-attention Default: None. key_padding_mask (Tensor): ByteTensor for `query`, with shape [bs, num_keys]. Default: None. Returns: Tensor: results with shape [num_queries, bs, embed_dims]. """ for layer in self.layers: query = layer( query, key, value, query_pos=query_pos, key_pos=key_pos, attn_masks=attn_masks, query_key_padding_mask=query_key_padding_mask, key_padding_mask=key_padding_mask, **kwargs) return query
"""Forward function for `TransformerCoder`. Args: query (Tensor): Input query with shape `(num_queries, bs, embed_dims)`. key (Tensor): The key tensor with shape `(num_keys, bs, embed_dims)`. value (Tensor): The value tensor with shape `(num_keys, bs, embed_dims)`. query_pos (Tensor): The positional encoding for `query`. Default: None. key_pos (Tensor): The positional encoding for `key`. Default: None. attn_masks (List[Tensor], optional): Each element is 2D Tensor which is used in calculation of corresponding attention in operation_order. Default: None. query_key_padding_mask (Tensor): ByteTensor for `query`, with shape [bs, num_queries]. Only used in self-attention Default: None. key_padding_mask (Tensor): ByteTensor for `query`, with shape [bs, num_keys]. Default: None. Returns: Tensor: results with shape [num_queries, bs, embed_dims]. """
https://github.com/HighCWu/ControlLoRA/blob/a6891215fc587af326ab1234718491741a5c2015/annotator/uniformer/mmcv/cnn/bricks/transformer.py#L549-L595
a6891215fc587af326ab1234718491741a5c2015
axlearn
github_2023
apple
python
CausalLmModelBuilder.v1_from_args
@classmethod def v1_from_args( cls, vocab_size: int, *, num_layers: int, hidden_dim: int, num_heads: int ) -> CausalLM: """Build a v1 Causal LM. Args: vocab_size: The vocabulary size. num_layers: The number of transformer layers. hidden_dim: The model hidden dimension. num_heads: THe number of attention heads. Returns: Initialized model. """ model = CausalLM( decoder=Decoder( attention_mask=ALiBiAttentionLogitBiasLayer(num_heads), emb=TransformerEmbeddings(Embedding(vocab_size, embedding_dim=hidden_dim)), transformer=StackedTransformerLayer( num_layers, layer=TransformerLayer( self_attention=TransformerAttentionLayer( target_dim=hidden_dim, source_dim=hidden_dim, num_heads=num_heads, structure="prenorm", norm="rmsnorm", qkv_linear_cls=FusedQKVLinear, linear_biases=False, ), feed_forward=TransformerFeedForwardLayer( input_dim=hidden_dim, hidden_dim=round(hidden_dim * (21.0 / 8.0)), activation=( _torch_activation_fn("nn.silu"), _torch_activation_fn("linear"), ), structure="prenorm", norm="rmsnorm", linear_biases=False, ), ), ), output_norm="rmsnorm", ) ) return model
"""Build a v1 Causal LM. Args: vocab_size: The vocabulary size. num_layers: The number of transformer layers. hidden_dim: The model hidden dimension. num_heads: THe number of attention heads. Returns: Initialized model. """
https://github.com/apple/axlearn/blob/b9551871eae1b887a55b1e7d682cc6db7a51bf1e/axlearn/common/adapter_torch.py#L2061-L2108
b9551871eae1b887a55b1e7d682cc6db7a51bf1e
axlearn
github_2023
apple
python
__init__
def __init__( self, *, input_size: tuple[int, int], num_masking_patches: int, num_attempts: int = 10, min_mask_patches: int = 16, min_aspect: float = 0.3, max_mask_patches: Optional[int] = None, max_aspect: Optional[float] = None, ): """Initializes MaskingGenerator. Args: input_size: an int tuple that represents (height, width) of the patchified target. num_masking_patches: the number of patches to be masked. num_attempts: the max number of attempts for one mask generation trial. min_mask_patches: the min number of patches for one masking area. max_mask_patches: the max number of patches for one masking area. If None, sets to num_masking_patches. min_aspect: the min aspect ratio (height/width) for one masking area. max_aspect: the max aspect ratio for one masking area. If None, sets to 1 / min_aspect. Raises: ValueError: if min_aspect or max_aspect are below 0 or max_aspect is smaller than min_aspect. """ self.height, self.width = input_size # Total number of patches in the pachified input. self.num_patches = self.height * self.width self.num_masking_patches = num_masking_patches self.num_attempts = num_attempts self.min_mask_patches = min_mask_patches self.max_mask_patches = ( num_masking_patches if max_mask_patches is None else max_mask_patches ) max_aspect = max_aspect or 1 / min_aspect if min_aspect <= 0 or max_aspect <= 0: raise ValueError("Both min and max aspect ratios need to be positive.") if min_aspect > max_aspect: raise ValueError("min_aspect needs to be no greater than max_aspect.") self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
"""Initializes MaskingGenerator. Args: input_size: an int tuple that represents (height, width) of the patchified target. num_masking_patches: the number of patches to be masked. num_attempts: the max number of attempts for one mask generation trial. min_mask_patches: the min number of patches for one masking area. max_mask_patches: the max number of patches for one masking area. If None, sets to num_masking_patches. min_aspect: the min aspect ratio (height/width) for one masking area. max_aspect: the max aspect ratio for one masking area. If None, sets to 1 / min_aspect. Raises: ValueError: if min_aspect or max_aspect are below 0 or max_aspect is smaller than min_aspect. """
https://github.com/apple/axlearn/blob/b9551871eae1b887a55b1e7d682cc6db7a51bf1e/axlearn/vision/mask_generator.py#L30-L71
b9551871eae1b887a55b1e7d682cc6db7a51bf1e
axlearn
github_2023
apple
python
MobileNets.endpoints_dims
@property def endpoints_dims(self) -> dict[str, int]: """A dict of {endpoint: dim} specifies dimension of intermediate representations.""" return self._endpoints_dims
"""A dict of {endpoint: dim} specifies dimension of intermediate representations."""
https://github.com/apple/axlearn/blob/b9551871eae1b887a55b1e7d682cc6db7a51bf1e/axlearn/vision/mobilenets.py#L514-L517
b9551871eae1b887a55b1e7d682cc6db7a51bf1e
rl4co
github_2023
ai4co
python
spatial_encoding
def spatial_encoding(td: TensorDict): """We use a spatial encoing as proposed in GraphFormer (https://arxiv.org/abs/2106.05234) The spatial encoding in GraphFormer determines the distance of the shortest path between and nodes i and j and uses a special value for node pairs that cannot be connected at all. For any two operations i<j of the same job, we determine the number of operations to be completet when starting at i before j can be started (e.g. i=3 and j=5 -> e=2) and for i>j the negative number of operations that starting from j, have been completet before arriving at i (e.g. i=5 j=3 -> e=-2). For i=j we set e=0 as well as for operations of different jobs. :param torch.Tensor[bs, n_ops] ops_job_map: tensor specifying the index of its corresponding job :return torch.Tensor[bs, n_ops, n_ops]: length of shortest path between any two operations """ bs, _, n_total_ops = td["job_ops_adj"].shape max_ops_per_job = int(td["job_ops_adj"].sum(-1).max()) ops_job_map = td["ops_job_map"] pad_mask = td["pad_mask"] same_job = (ops_job_map[:, None] == ops_job_map[..., None]).to(torch.int32) # mask padded same_job[pad_mask.unsqueeze(2).expand_as(same_job)] = 0 same_job[pad_mask.unsqueeze(1).expand_as(same_job)] = 0 # take upper triangular of same_job and set diagonal to zero for counting purposes upper_tri = torch.triu(same_job) - torch.diag( torch.ones(n_total_ops, device=td.device) )[None].expand_as(same_job) # cumsum and masking of operations that do not belong to the same job num_jumps = upper_tri.cumsum(2) * upper_tri # mirror the matrix num_jumps = num_jumps + num_jumps.transpose(1, 2) # NOTE: shifted this logic into the spatial encoding module # num_jumps = num_jumps + (-num_jumps.transpose(1,2)) assert not torch.any(num_jumps >= max_ops_per_job) # special value for ops of different jobs and self-loops num_jumps = torch.where(num_jumps == 0, -1, num_jumps) self_mask = torch.eye(n_total_ops).repeat(bs, 1, 1).bool() num_jumps[self_mask] = 0 return num_jumps
"""We use a spatial encoing as proposed in GraphFormer (https://arxiv.org/abs/2106.05234) The spatial encoding in GraphFormer determines the distance of the shortest path between and nodes i and j and uses a special value for node pairs that cannot be connected at all. For any two operations i<j of the same job, we determine the number of operations to be completet when starting at i before j can be started (e.g. i=3 and j=5 -> e=2) and for i>j the negative number of operations that starting from j, have been completet before arriving at i (e.g. i=5 j=3 -> e=-2). For i=j we set e=0 as well as for operations of different jobs. :param torch.Tensor[bs, n_ops] ops_job_map: tensor specifying the index of its corresponding job :return torch.Tensor[bs, n_ops, n_ops]: length of shortest path between any two operations """
https://github.com/ai4co/rl4co/blob/643ef99d118ce535e615a9441838782a2decf412/rl4co/envs/scheduling/fjsp/utils.py#L157-L193
643ef99d118ce535e615a9441838782a2decf412
SubpopBench
github_2023
YyzHarry
python
_create_examples
def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = line[3] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples
"""Creates examples for the training and dev sets."""
https://github.com/YyzHarry/SubpopBench/blob/4d3dbbe21029666ef19d040e110ec22908640c5b/utils_glue.py#L183-L195
4d3dbbe21029666ef19d040e110ec22908640c5b
semantic-kernel
github_2023
microsoft
python
_get_database_proxy
async def _get_database_proxy(self, **kwargs) -> DatabaseProxy: """Gets the database proxy.""" try: if await self._does_database_exist(): return self.cosmos_client.get_database_client(self.database_name) if self.create_database: return await self.cosmos_client.create_database(self.database_name, **kwargs) raise VectorStoreOperationException(f"Database '{self.database_name}' does not exist.") except Exception as e: raise VectorStoreOperationException(f"Failed to get database proxy for '{id}'.") from e
"""Gets the database proxy."""
https://github.com/microsoft/semantic-kernel/blob/cd84e877980187e62d86bb5bc6086d264e62ee83/python/semantic_kernel/connectors/memory/azure_cosmos_db/azure_cosmos_db_no_sql_base.py#L101-L111
cd84e877980187e62d86bb5bc6086d264e62ee83
semantic-kernel
github_2023
microsoft
python
_get_underlying_type
def _get_underlying_type(annotation: Any) -> Any: """Get the underlying type of the annotation.""" if isinstance(annotation, types.UnionType): return _get_non_none_type(annotation.__args__) if hasattr(annotation, "__origin__"): if annotation.__origin__ is Union: return _get_non_none_type(get_args(annotation)) if isinstance(annotation.__origin__, types.UnionType): return _get_non_none_type(annotation.__origin__.__args__) return annotation.__origin__ return annotation
"""Get the underlying type of the annotation."""
https://github.com/microsoft/semantic-kernel/blob/cd84e877980187e62d86bb5bc6086d264e62ee83/python/semantic_kernel/functions/kernel_function_decorator.py#L88-L102
cd84e877980187e62d86bb5bc6086d264e62ee83
semantic-kernel
github_2023
microsoft
python
get_collections
async def get_collections(self) -> list[str]: """Nullifies behavior of SemanticTextMemoryBase get_collections.""" return []
"""Nullifies behavior of SemanticTextMemoryBase get_collections."""
https://github.com/microsoft/semantic-kernel/blob/cd84e877980187e62d86bb5bc6086d264e62ee83/python/semantic_kernel/memory/null_memory.py#L49-L51
cd84e877980187e62d86bb5bc6086d264e62ee83
semantic-kernel
github_2023
microsoft
python
azure_cognitive_search_memory_store
@pytest.fixture def azure_cognitive_search_memory_store(azure_ai_search_unit_test_env): """Fixture to instantiate AzureCognitiveSearchMemoryStore with basic configuration.""" return AzureCognitiveSearchMemoryStore( 1536, "https://test.search.windows.net", azure_credentials=AzureKeyCredential("test_key") )
"""Fixture to instantiate AzureCognitiveSearchMemoryStore with basic configuration."""
https://github.com/microsoft/semantic-kernel/blob/cd84e877980187e62d86bb5bc6086d264e62ee83/python/tests/unit/memory/test_azure_cognitive_search_memory_store_unit_tests.py#L13-L18
cd84e877980187e62d86bb5bc6086d264e62ee83
StableDiffusionReconstruction
github_2023
yu-takagi
python
forward
def forward(self, *xs): """Forward pass. Returns: tensor: output """ output = xs[0] if len(xs) == 2: res = self.resConfUnit1(xs[1]) output = self.skip_add.add(output, res) # output += res output = self.resConfUnit2(output) output = nn.functional.interpolate( output, scale_factor=2, mode="bilinear", align_corners=self.align_corners ) output = self.out_conv(output) return output
"""Forward pass. Returns: tensor: output """
https://github.com/yu-takagi/StableDiffusionReconstruction/blob/e187d4b3db1d647ee3e1b4256a2068ffd15df683/codes/diffusion_sd2/stablediffusion/ldm/modules/midas/midas/blocks.py#L320-L341
e187d4b3db1d647ee3e1b4256a2068ffd15df683
SillyTavern-Extras
github_2023
SillyTavern
python
posedict_to_pose
def posedict_to_pose(posedict: Dict[str, float]) -> List[float]: """Convert a posedict (from an emotion JSON) into a list of morph values (in the order the models expect them).""" # sanity check unrecognized_keys = set(posedict.keys()) - set(posedict_keys) if unrecognized_keys: logger.warning(f"posedict_to_pose: ignoring unrecognized keys in posedict: {unrecognized_keys}") # Missing keys are fine - keys for zero values can simply be omitted. pose = [0.0 for i in range(len(posedict_keys))] for idx, key in enumerate(posedict_keys): pose[idx] = posedict.get(key, 0.0) return pose
"""Convert a posedict (from an emotion JSON) into a list of morph values (in the order the models expect them)."""
https://github.com/SillyTavern/SillyTavern-Extras/blob/fdc1ec04b632b1d871cc0ad3a9aa132e985fc398/talkinghead/tha3/app/util.py#L118-L129
fdc1ec04b632b1d871cc0ad3a9aa132e985fc398
BiFormer
github_2023
rayleizhu
python
to_tensor
def to_tensor(self, dtype, device): """See :func:`BaseInstanceMasks.to_tensor`.""" if len(self.masks) == 0: return torch.empty((0, self.height, self.width), dtype=dtype, device=device) ndarray_masks = self.to_ndarray() return torch.tensor(ndarray_masks, dtype=dtype, device=device)
"""See :func:`BaseInstanceMasks.to_tensor`."""
https://github.com/rayleizhu/BiFormer/blob/1697bbbeafb8680524898f1dcaac10defd0604be/object_detection/mmdet/core/mask/structures.py#L881-L888
1697bbbeafb8680524898f1dcaac10defd0604be
BiFormer
github_2023
rayleizhu
python
__call__
def __call__(self, results): """Call function to load proposals from file. Args: results (dict): Result dict from :obj:`mmdet.CustomDataset`. Returns: dict: The dict contains loaded proposal annotations. """ proposals = results['proposals'] if proposals.shape[1] not in (4, 5): raise AssertionError( 'proposals should have shapes (n, 4) or (n, 5), ' f'but found {proposals.shape}') proposals = proposals[:, :4] if self.num_max_proposals is not None: proposals = proposals[:self.num_max_proposals] if len(proposals) == 0: proposals = np.array([[0, 0, 0, 0]], dtype=np.float32) results['proposals'] = proposals results['bbox_fields'].append('proposals') return results
"""Call function to load proposals from file. Args: results (dict): Result dict from :obj:`mmdet.CustomDataset`. Returns: dict: The dict contains loaded proposal annotations. """
https://github.com/rayleizhu/BiFormer/blob/1697bbbeafb8680524898f1dcaac10defd0604be/object_detection/mmdet/datasets/pipelines/loading.py#L401-L425
1697bbbeafb8680524898f1dcaac10defd0604be
BiFormer
github_2023
rayleizhu
python
forward
def forward(self, x): """Forward function.""" if self.num_branches == 1: return [self.branches[0](x[0])] for i in range(self.num_branches): x[i] = self.branches[i](x[i]) x_fuse = [] for i in range(len(self.fuse_layers)): y = 0 for j in range(self.num_branches): if i == j: y += x[j] else: y += self.fuse_layers[i][j](x[j]) x_fuse.append(self.relu(y)) return x_fuse
"""Forward function."""
https://github.com/rayleizhu/BiFormer/blob/1697bbbeafb8680524898f1dcaac10defd0604be/object_detection/mmdet/models/backbones/hrnet.py#L177-L194
1697bbbeafb8680524898f1dcaac10defd0604be
BiFormer
github_2023
rayleizhu
python
varifocal_loss
@mmcv.jit(derivate=True, coderize=True) def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', avg_factor=None): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive example with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ # pred and target should be of the same size assert pred.size() == target.size() pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if iou_weighted: focal_weight = target * (target > 0.0).float() + \ alpha * (pred_sigmoid - target).abs().pow(gamma) * \ (target <= 0.0).float() else: focal_weight = (target > 0.0).float() + \ alpha * (pred_sigmoid - target).abs().pow(gamma) * \ (target <= 0.0).float() loss = F.binary_cross_entropy_with_logits( pred, target, reduction='none') * focal_weight loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive example with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """
https://github.com/rayleizhu/BiFormer/blob/1697bbbeafb8680524898f1dcaac10defd0604be/object_detection/mmdet/models/losses/varifocal_loss.py#L9-L55
1697bbbeafb8680524898f1dcaac10defd0604be
PegasusSimulator
github_2023
PegasusSimulator
python
PeopleManager.get_people_manager
@staticmethod def get_people_manager(): """ Method that returns the current people manager. """ return PeopleManager()
""" Method that returns the current people manager. """
https://github.com/PegasusSimulator/PegasusSimulator/blob/31367ff57d2c44eb39d8fc7fced5044a24a930eb/extensions/pegasus.simulator/pegasus/simulator/logic/people_manager.py#L59-L64
31367ff57d2c44eb39d8fc7fced5044a24a930eb
foundry-dev-tools
github_2023
emdgroup
python
_convert_old_conf
def _convert_old_conf(console: Console, path: Path) -> str: """Reads old config file and converts it to the v2 toml.""" cp = ConfigParser() cp.read_string(path.read_text()) if not cp.has_section("default"): console.print(f"The config file {path!s} does not contain any configuration, nothing to migrate.") sys.exit(1) v1_config = dict(cp.items("default")) v2_config = v1_to_v2_config_dict(v1_config, env=False, get_config=False) v2_config_toml = tomli_w.dumps(v2_config) console.print( Markdown(f"Your v1 project config converted to the v2 format looks like this:\n```toml\n{v2_config_toml}\n```"), ) return v2_config_toml
"""Reads old config file and converts it to the v2 toml."""
https://github.com/emdgroup/foundry-dev-tools/blob/605e8c1d810dc67f45cefad7edc3f368f39b0d2c/libs/foundry-dev-tools/src/foundry_dev_tools/cli/config.py#L180-L194
605e8c1d810dc67f45cefad7edc3f368f39b0d2c
mmrotate-dcfl
github_2023
Chasel-Tsui
python
_resize_seg
def _resize_seg(self, results): """Resize semantic segmentation map with ``results['scale']``.""" for key in results.get('seg_fields', []): if self.keep_ratio: gt_seg = mmcv.imrescale( results[key], results['scale'], interpolation='nearest', backend=self.backend) else: gt_seg = mmcv.imresize( results[key], results['scale'], interpolation='nearest', backend=self.backend) results[key] = gt_seg
"""Resize semantic segmentation map with ``results['scale']``."""
https://github.com/Chasel-Tsui/mmrotate-dcfl/blob/d60ca27234a3276a4ca714b5ad616366a4bbdd9a/mmdet/datasets/pipelines/transforms.py#L268-L283
d60ca27234a3276a4ca714b5ad616366a4bbdd9a
mmrotate-dcfl
github_2023
Chasel-Tsui
python
simple_test
def simple_test(self, img, img_metas, rescale=False): """Test function without test time augmentation. Args: imgs (list[torch.Tensor]): List of multiple images img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[np.ndarray]: proposals """ x = self.extract_feat(img) # get origin input shape to onnx dynamic input shape if torch.onnx.is_in_onnx_export(): img_shape = torch._shape_as_tensor(img)[2:] img_metas[0]['img_shape_for_onnx'] = img_shape proposal_list = self.rpn_head.simple_test_rpn(x, img_metas) if rescale: for proposals, meta in zip(proposal_list, img_metas): proposals[:, :4] /= proposals.new_tensor(meta['scale_factor']) if torch.onnx.is_in_onnx_export(): return proposal_list return [proposal.cpu().numpy() for proposal in proposal_list]
"""Test function without test time augmentation. Args: imgs (list[torch.Tensor]): List of multiple images img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[np.ndarray]: proposals """
https://github.com/Chasel-Tsui/mmrotate-dcfl/blob/d60ca27234a3276a4ca714b5ad616366a4bbdd9a/mmdet/models/detectors/rpn.py#L91-L115
d60ca27234a3276a4ca714b5ad616366a4bbdd9a
nas-tools
github_2023
linyuan0213
python
WebAction.__get_site
@staticmethod def __get_site(data): """ 查询单个站点信息 """ tid = data.get("id") site_free = False site_2xfree = False site_hr = False if tid: ret = Sites().get_sites(siteid=tid) if ret.get("signurl"): site_attr = SiteConf().get_grap_conf(ret.get("signurl")) if site_attr.get("FREE"): site_free = True if site_attr.get("2XFREE"): site_2xfree = True if site_attr.get("HR"): site_hr = True else: ret = [] return {"code": 0, "site": ret, "site_free": site_free, "site_2xfree": site_2xfree, "site_hr": site_hr}
""" 查询单个站点信息 """
https://github.com/linyuan0213/nas-tools/blob/0badded472a89b9171abba049ea05bd6f3611364/web/action.py#L1153-L1174
0badded472a89b9171abba049ea05bd6f3611364