repo_name
stringlengths 2
55
| dataset
stringclasses 1
value | owner
stringlengths 3
31
| lang
stringclasses 10
values | func_name
stringlengths 1
104
| code
stringlengths 20
96.7k
| docstring
stringlengths 1
4.92k
| url
stringlengths 94
241
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|
sound_dataset_tools2
|
github_2023
|
kslz
|
python
|
del_dataset
|
def del_dataset(self, dataset_id, dataset_name):
"""
考虑做伪删除,但是感觉没必要
"""
msg_box = QMessageBox() # 后悔药(不
msg_box.setWindowTitle("提示")
msg_box.setText(f"确认删除数据集 {dataset_name} 吗?\n{dataset_name} 将会永久失去!(真的很久!)")
msg_box.setIcon(QMessageBox.Question)
# 添加按钮
yes_button = msg_box.addButton("确定", QMessageBox.AcceptRole)
no_button = msg_box.addButton("取消", QMessageBox.RejectRole)
# 显示消息框,等待用户响应
msg_box.exec()
# 获取用户的响应
button_clicked = msg_box.clickedButton()
if button_clicked == yes_button:
try:
# dataset = Dataset.delete().where(Dataset.dataset_id == dataset_id)
# self.add_dataset_data()
dataset = Dataset.get(Dataset.dataset_id == dataset_id)
name = dataset.dataset_name
dataset.delete_instance()
del_file_by_dataset_id(dataset_id)
except Exception as e:
guilogger.error(f"删除数据集 id={dataset_id} 失败")
guilogger.error(e)
else:
guilogger.info(f"数据集 {name} 成功删除")
finally:
self.add_dataset_data()
else:
pass
|
"""
考虑做伪删除,但是感觉没必要
"""
|
https://github.com/kslz/sound_dataset_tools2/blob/5b4ce54c2c597b16e246f709322156ca69aa1a20/ui/mygui.py#L397-L434
|
5b4ce54c2c597b16e246f709322156ca69aa1a20
|
R2ET
|
github_2023
|
Kebii
|
python
|
quat2euler
|
def quat2euler(q, order='xyz', degrees=True):
"""
Convert (w, x, y, z) quaternions to xyz euler angles. This is used for bvh output.
"""
q0 = q[..., 0]
q1 = q[..., 1]
q2 = q[..., 2]
q3 = q[..., 3]
es = torch.empty(q0.shape + (3,), device=q.device, dtype=q.dtype)
if order == 'xyz':
es[..., 2] = torch.atan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[..., 1] = torch.asin((2 * (q1 * q3 + q0 * q2)).clip(-1, 1))
es[..., 0] = torch.atan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
else:
raise NotImplementedError('Cannot convert to ordering %s' % order)
if degrees:
es = es * 180 / np.pi
return es
|
"""
Convert (w, x, y, z) quaternions to xyz euler angles. This is used for bvh output.
"""
|
https://github.com/Kebii/R2ET/blob/41c7e40fcb8a40eb3fb0deccf3d6b88b8230d572/outside-code/transforms.py#L103-L123
|
41c7e40fcb8a40eb3fb0deccf3d6b88b8230d572
|
TerminalGPT
|
github_2023
|
adamyodinsky
|
python
|
test_get_printer
|
def test_get_printer(self):
"""Tests the get_printer method."""
self.assertIsInstance(PrinterFactory.get_printer("plain"), PlainPrinter)
self.assertIsInstance(PrinterFactory.get_printer("markdown"), MarkdownPrinter)
|
"""Tests the get_printer method."""
|
https://github.com/adamyodinsky/TerminalGPT/blob/29aab7d9db5287b70c06abe161937bedc86e7933/tests/unit/test_printer.py#L145-L148
|
29aab7d9db5287b70c06abe161937bedc86e7933
|
coding-competitions-archive
|
github_2023
|
google
|
python
|
_utils_ToFloat
|
def _utils_ToFloat(s):
"""Returns float(s) if s is a float. Otherwise None.
Disallows infinities and nans.
Args:
s: A string to convert to a float.
Returns:
An float or None.
"""
try:
x = float(s)
if x not in [float('inf'), float('-inf')] and x == x: # not NaN
return x
else:
return None
except ValueError:
return None
|
"""Returns float(s) if s is a float. Otherwise None.
Disallows infinities and nans.
Args:
s: A string to convert to a float.
Returns:
An float or None.
"""
|
https://github.com/google/coding-competitions-archive/blob/87385db7dbd81b281225412b8ad496334536d016/codejam/2019/round_3/napkin_folding/output_validators/validator/napkin_folding.py#L141-L159
|
87385db7dbd81b281225412b8ad496334536d016
|
ChatGPT-for-Translation
|
github_2023
|
Raychanan
|
python
|
check_file_path
|
def check_file_path(file_path: Path):
"""
Ensure file extension is in ALLOWED_FILE_TYPES or is a URL.
If file ends with _translated.txt or _bilingual.txt, skip it.
If there is any txt file ending with _translated.txt or _bilingual.txt, skip it.
"""
if not file_path.suffix.lower() in ALLOWED_FILE_TYPES and not str(
file_path).startswith('http'):
print(f"File extension {file_path.suffix} is not allowed.")
raise Exception("Please use a txt file or URL")
if file_path.stem.endswith("_translated") or file_path.stem.endswith(
"extracted_translated"):
print(
f"You already have a translated file for {file_path}, skipping...")
return False
elif file_path.stem.endswith("_bilingual") or file_path.stem.endswith(
"extracted_bilingual"):
print(
f"You already have a bilingual file for {file_path}, skipping...")
return False
if (file_path.with_name(f"{file_path.stem}_translated.txt").exists() or
file_path.with_name(f"{file_path.stem}_extracted_translated.txt").exists()):
print(
f"You already have a translated file for {file_path}, skipping...")
return False
return True
|
"""
Ensure file extension is in ALLOWED_FILE_TYPES or is a URL.
If file ends with _translated.txt or _bilingual.txt, skip it.
If there is any txt file ending with _translated.txt or _bilingual.txt, skip it.
"""
|
https://github.com/Raychanan/ChatGPT-for-Translation/blob/0c6fe5d1fe66c1faed967e1b3403de5627bc85cc/ChatGPT-translate.py#L234-L262
|
0c6fe5d1fe66c1faed967e1b3403de5627bc85cc
|
CFINet
|
github_2023
|
shaunyuan22
|
python
|
get_proposal_pos_embed
|
def get_proposal_pos_embed(self,
proposals,
num_pos_feats=128,
temperature=10000):
"""Get the position embedding of proposal."""
scale = 2 * math.pi
dim_t = torch.arange(
num_pos_feats, dtype=torch.float32, device=proposals.device)
dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats)
# N, L, 4
proposals = proposals.sigmoid() * scale
# N, L, 4, 128
pos = proposals[:, :, :, None] / dim_t
# N, L, 4, 64, 2
pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()),
dim=4).flatten(2)
return pos
|
"""Get the position embedding of proposal."""
|
https://github.com/shaunyuan22/CFINet/blob/45af342276e883aaacd49e280dba641331786603/mmdet/models/utils/transformer.py#L875-L891
|
45af342276e883aaacd49e280dba641331786603
|
OpenOcc
|
github_2023
|
wzzheng
|
python
|
get_reference_points
|
def get_reference_points(H, W, Z=8, num_points_in_pillar=4, dim='3d', bs=1, device='cpu', dtype=torch.float):
"""Get the reference points used in image cross-attention and single plane self-attention.
Args:
H, W: spatial shape of tpv.
Z: hight of pillar.
D: sample D points uniformly from each pillar.
device (obj:`device`): The device where
reference_points should be.
Returns:
Tensor: reference points used in decoder, has \
shape (bs, num_keys, num_levels, 2).
"""
# reference points in 3D space, used in image cross-attention
if dim == '3d':
zs = torch.linspace(0.5, Z - 0.5, num_points_in_pillar, dtype=dtype,
device=device).view(-1, 1, 1).expand(num_points_in_pillar, H, W) / Z
xs = torch.linspace(0.5, W - 0.5, W, dtype=dtype,
device=device).view(1, 1, -1).expand(num_points_in_pillar, H, W) / W
ys = torch.linspace(0.5, H - 0.5, H, dtype=dtype,
device=device).view(1, -1, 1).expand(num_points_in_pillar, H, W) / H
ref_3d = torch.stack((xs, ys, zs), -1)
ref_3d = ref_3d.permute(0, 3, 1, 2).flatten(2).permute(0, 2, 1)
ref_3d = ref_3d[None].repeat(bs, 1, 1, 1)
return ref_3d
# reference points on 2D tpv plane, used in self attention in tpvformer04
# which is an older version. Now we use get_cross_view_ref_points instead.
elif dim == '2d':
ref_y, ref_x = torch.meshgrid(
torch.linspace(
0.5, H - 0.5, H, dtype=dtype, device=device),
torch.linspace(
0.5, W - 0.5, W, dtype=dtype, device=device))
ref_y = ref_y.reshape(-1)[None] / H
ref_x = ref_x.reshape(-1)[None] / W
ref_2d = torch.stack((ref_x, ref_y), -1)
ref_2d = ref_2d.repeat(bs, 1, 1).unsqueeze(2)
return ref_2d
|
"""Get the reference points used in image cross-attention and single plane self-attention.
Args:
H, W: spatial shape of tpv.
Z: hight of pillar.
D: sample D points uniformly from each pillar.
device (obj:`device`): The device where
reference_points should be.
Returns:
Tensor: reference points used in decoder, has \
shape (bs, num_keys, num_levels, 2).
"""
|
https://github.com/wzzheng/OpenOcc/blob/dc80f79276e7048e0a9dc312531ad04c850963fb/model/encoder/tpvformer/utils.py#L76-L114
|
dc80f79276e7048e0a9dc312531ad04c850963fb
|
vscode-ocp-cad-viewer
|
github_2023
|
bernhard-42
|
python
|
get_default
|
def get_default(key):
"""Get default value for key"""
return DEFAULTS.get(key)
|
"""Get default value for key"""
|
https://github.com/bernhard-42/vscode-ocp-cad-viewer/blob/d32b3b482ee90b6762fa2c9819b6c9edbcf2d66b/ocp_vscode/config.py#L240-L242
|
d32b3b482ee90b6762fa2c9819b6c9edbcf2d66b
|
jaxonnxruntime
|
github_2023
|
google
|
python
|
BatchNormalization.version_9
|
@classmethod
def version_9(
cls, node: onnx_node.OnnxNode, inputs: Sequence[Any]
) -> Callable[..., Any]:
"""ONNX version_9 BatchNormalization op."""
cls._prepare(node, inputs, onnx_batchnormalization)
return onnx_batchnormalization
|
"""ONNX version_9 BatchNormalization op."""
|
https://github.com/google/jaxonnxruntime/blob/e20b8defdfd4263c89a5682e6d993499ad5bcb74/jaxonnxruntime/onnx_ops/batchnormalization.py#L56-L62
|
e20b8defdfd4263c89a5682e6d993499ad5bcb74
|
jaxonnxruntime
|
github_2023
|
google
|
python
|
Mul.version_13
|
@classmethod
def version_13(
cls, node: onnx_node.OnnxNode, inputs: Sequence[Any]
) -> Callable[..., Any]:
"""ONNX version_13 Mul op."""
cls._prepare(node, inputs, onnx_mul)
return onnx_mul
|
"""ONNX version_13 Mul op."""
|
https://github.com/google/jaxonnxruntime/blob/e20b8defdfd4263c89a5682e6d993499ad5bcb74/jaxonnxruntime/onnx_ops/mul.py#L53-L59
|
e20b8defdfd4263c89a5682e6d993499ad5bcb74
|
llama.cpp
|
github_2023
|
ggerganov
|
python
|
generate_markdown_documentation
|
def generate_markdown_documentation(
pydantic_models: list[type[BaseModel]], model_prefix="Model", fields_prefix="Fields",
documentation_with_field_description=True
) -> str:
"""
Generate markdown documentation for a list of Pydantic models.
Args:
pydantic_models (list[type[BaseModel]]): list of Pydantic model classes.
model_prefix (str): Prefix for the model section.
fields_prefix (str): Prefix for the fields section.
documentation_with_field_description (bool): Include field descriptions in the documentation.
Returns:
str: Generated text documentation.
"""
documentation = ""
pyd_models: list[tuple[type[BaseModel], bool]] = [(model, True) for model in pydantic_models]
for model, add_prefix in pyd_models:
if add_prefix:
documentation += f"{model_prefix}: {model.__name__}\n"
else:
documentation += f"Model: {model.__name__}\n"
# Handling multi-line model description with proper indentation
class_doc = getdoc(model)
base_class_doc = getdoc(BaseModel)
class_description = class_doc if class_doc and class_doc != base_class_doc else ""
if class_description != "":
documentation += " Description: "
documentation += format_multiline_description(class_description, 0) + "\n"
if add_prefix:
# Indenting the fields section
documentation += f" {fields_prefix}:\n"
else:
documentation += f" Fields:\n" # noqa: F541
if isclass(model) and issubclass(model, BaseModel):
for name, field_type in get_type_hints(model).items():
# if name == "markdown_code_block":
# continue
if get_origin(field_type) == list:
element_type = get_args(field_type)[0]
if isclass(element_type) and issubclass(element_type, BaseModel):
pyd_models.append((element_type, False))
if get_origin(field_type) == Union:
element_types = get_args(field_type)
for element_type in element_types:
if isclass(element_type) and issubclass(element_type, BaseModel):
pyd_models.append((element_type, False))
documentation += generate_field_markdown(
name, field_type, model, documentation_with_field_description=documentation_with_field_description
)
documentation += "\n"
if hasattr(model, "Config") and hasattr(model.Config,
"json_schema_extra") and "example" in model.Config.json_schema_extra:
documentation += f" Expected Example Output for {format_model_and_field_name(model.__name__)}:\n"
json_example = json.dumps(model.Config.json_schema_extra["example"])
documentation += format_multiline_description(json_example, 2) + "\n"
return documentation
|
"""
Generate markdown documentation for a list of Pydantic models.
Args:
pydantic_models (list[type[BaseModel]]): list of Pydantic model classes.
model_prefix (str): Prefix for the model section.
fields_prefix (str): Prefix for the fields section.
documentation_with_field_description (bool): Include field descriptions in the documentation.
Returns:
str: Generated text documentation.
"""
|
https://github.com/ggerganov/llama.cpp/blob/4078c77f9891831f29ffc7c315c8ec6695ba5ce7/examples/pydantic_models_to_grammar.py#L676-L738
|
4078c77f9891831f29ffc7c315c8ec6695ba5ce7
|
home-assistant-streamdeck-yaml
|
github_2023
|
basnijholt
|
python
|
_ButtonDialBase.templatable
|
@classmethod
def templatable(cls: type[Button]) -> set[str]:
"""Return if an attribute is templatable, which is if the type-annotation is str."""
schema = cls.schema()
properties = schema["properties"]
return {k for k, v in properties.items() if v["allow_template"]}
|
"""Return if an attribute is templatable, which is if the type-annotation is str."""
|
https://github.com/basnijholt/home-assistant-streamdeck-yaml/blob/e04dc7229b8a6148a511ed455e2df5988bbcf6c4/home_assistant_streamdeck_yaml.py#L187-L192
|
e04dc7229b8a6148a511ed455e2df5988bbcf6c4
|
camel
|
github_2023
|
camel-ai
|
python
|
GroqModel.stream
|
@property
def stream(self) -> bool:
r"""Returns whether the model supports streaming. But Groq API does
not support streaming.
"""
return False
|
r"""Returns whether the model supports streaming. But Groq API does
not support streaming.
"""
|
https://github.com/camel-ai/camel/blob/4536d76610140ac02f92cb38a3dfc56d95f231ac/camel/models/groq_model.py#L134-L139
|
4536d76610140ac02f92cb38a3dfc56d95f231ac
|
MaterialSearch
|
github_2023
|
chn-lee-yumi
|
python
|
clean_cache
|
def clean_cache():
"""
清空搜索缓存
"""
search_image_by_text_path_time.cache_clear()
search_image_by_image.cache_clear()
search_video_by_image.cache_clear()
search_video_by_text_path_time.cache_clear()
search_pexels_video_by_text.cache_clear()
|
"""
清空搜索缓存
"""
|
https://github.com/chn-lee-yumi/MaterialSearch/blob/c7a5e94d67c8dd67fc6c7d3f1eb6cf8c89d5467c/search.py#L21-L29
|
c7a5e94d67c8dd67fc6c7d3f1eb6cf8c89d5467c
|
entaoai
|
github_2023
|
akshata29
|
python
|
__validate_time_delta
|
def __validate_time_delta(value: str) -> str:
"""
Check to see if passed string is in the list of possible Time Deltas.
:param value: Time Delta name.
:return: Passed value or No Return
"""
valid_values = TIME_DELTA_VALUES
if value in valid_values:
return value
else:
logging.error(
f"Invalid time_delta value: {value}. Valid options: {valid_values}"
)
|
"""
Check to see if passed string is in the list of possible Time Deltas.
:param value: Time Delta name.
:return: Passed value or No Return
"""
|
https://github.com/akshata29/entaoai/blob/aa6cfbfbf6f19128bcc9135bc75effe38857bd31/api/Python/Utilities/fmp.py#L456-L468
|
aa6cfbfbf6f19128bcc9135bc75effe38857bd31
|
ai.deploy.box
|
github_2023
|
TalkUHulk
|
python
|
intree_extensions
|
def intree_extensions(
paths: Iterable[str], package_dir: Optional[Dict[str, str]] = None
) -> List[Pybind11Extension]:
"""
Generate Pybind11Extensions from source files directly located in a Python
source tree.
``package_dir`` behaves as in ``setuptools.setup``. If unset, the Python
package root parent is determined as the first parent directory that does
not contain an ``__init__.py`` file.
"""
exts = []
if package_dir is None:
for path in paths:
parent, _ = os.path.split(path)
while os.path.exists(os.path.join(parent, "__init__.py")):
parent, _ = os.path.split(parent)
relname, _ = os.path.splitext(os.path.relpath(path, parent))
qualified_name = relname.replace(os.path.sep, ".")
exts.append(Pybind11Extension(qualified_name, [path]))
return exts
for path in paths:
for prefix, parent in package_dir.items():
if path.startswith(parent):
relname, _ = os.path.splitext(os.path.relpath(path, parent))
qualified_name = relname.replace(os.path.sep, ".")
if prefix:
qualified_name = prefix + "." + qualified_name
exts.append(Pybind11Extension(qualified_name, [path]))
break
else:
msg = (
f"path {path} is not a child of any of the directories listed "
f"in 'package_dir' ({package_dir})"
)
raise ValueError(msg)
return exts
|
"""
Generate Pybind11Extensions from source files directly located in a Python
source tree.
``package_dir`` behaves as in ``setuptools.setup``. If unset, the Python
package root parent is determined as the first parent directory that does
not contain an ``__init__.py`` file.
"""
|
https://github.com/TalkUHulk/ai.deploy.box/blob/f937195eab6de38078d1524dae598fd5f142c8c8/python/pybind11/pybind11/setup_helpers.py#L293-L332
|
f937195eab6de38078d1524dae598fd5f142c8c8
|
vscode-mypy
|
github_2023
|
microsoft
|
python
|
update_sys_path
|
def update_sys_path(path_to_add: str) -> None:
"""Add given path to `sys.path`."""
if path_to_add not in sys.path and os.path.isdir(path_to_add):
sys.path.append(path_to_add)
|
"""Add given path to `sys.path`."""
|
https://github.com/microsoft/vscode-mypy/blob/a5cf3e1e33b09dd401190801b5ad32702344540d/bundled/tool/_debug_server.py#L11-L14
|
a5cf3e1e33b09dd401190801b5ad32702344540d
|
LocalAI
|
github_2023
|
mudler
|
python
|
LoadModel
|
def LoadModel(self, request, context):
"""
A gRPC method that loads a model into memory.
Args:
request: A LoadModelRequest object that contains the request parameters.
context: A grpc.ServicerContext object that provides information about the RPC.
Returns:
A Result object that contains the result of the LoadModel operation.
"""
model_name = request.Model
try:
kwargs = {}
if request.Type != "":
kwargs['model_type'] = request.Type
if request.PipelineType != "": # Reuse the PipelineType field for language
kwargs['lang'] = request.PipelineType
self.model_name = model_name
self.model = Reranker(model_name, **kwargs)
except Exception as err:
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
# Implement your logic here for the LoadModel service
# Replace this with your desired response
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
"""
A gRPC method that loads a model into memory.
Args:
request: A LoadModelRequest object that contains the request parameters.
context: A grpc.ServicerContext object that provides information about the RPC.
Returns:
A Result object that contains the result of the LoadModel operation.
"""
|
https://github.com/mudler/LocalAI/blob/e01acc88c984c60b5a3e60bb1e12d4e232a20f6c/backend/python/rerankers/backend.py#L45-L70
|
e01acc88c984c60b5a3e60bb1e12d4e232a20f6c
|
BlenderGPT
|
github_2023
|
gd3kr
|
python
|
_frozen_setattrs
|
def _frozen_setattrs(self, name, value):
"""
Attached to frozen classes as __setattr__.
"""
if isinstance(self, BaseException) and name in (
"__cause__",
"__context__",
):
BaseException.__setattr__(self, name, value)
return
raise FrozenInstanceError()
|
"""
Attached to frozen classes as __setattr__.
"""
|
https://github.com/gd3kr/BlenderGPT/blob/3fbc3bd3f169d904f8bf8a067807c4a71d3d3b4b/lib/attr/_make.py#L587-L598
|
3fbc3bd3f169d904f8bf8a067807c4a71d3d3b4b
|
BlenderGPT
|
github_2023
|
gd3kr
|
python
|
ChatCompletion.create
|
@classmethod
def create(cls, *args, **kwargs):
"""
Creates a new chat completion for the provided messages and parameters.
See https://platform.openai.com/docs/api-reference/chat-completions/create
for a list of valid parameters.
"""
start = time.time()
timeout = kwargs.pop("timeout", None)
while True:
try:
return super().create(*args, **kwargs)
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
|
"""
Creates a new chat completion for the provided messages and parameters.
See https://platform.openai.com/docs/api-reference/chat-completions/create
for a list of valid parameters.
"""
|
https://github.com/gd3kr/BlenderGPT/blob/3fbc3bd3f169d904f8bf8a067807c4a71d3d3b4b/lib/openai/api_resources/chat_completion.py#L12-L30
|
3fbc3bd3f169d904f8bf8a067807c4a71d3d3b4b
|
xTuring
|
github_2023
|
stochasticai
|
python
|
__getattr__
|
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.model, name)
|
"""Forward missing attributes to the wrapped module."""
|
https://github.com/stochasticai/xTuring/blob/570a0d6f971e47d9dde3d8b183c186e2010ba384/src/xturing/engines/lora_engine/lora.py#L399-L404
|
570a0d6f971e47d9dde3d8b183c186e2010ba384
|
UniDetector
|
github_2023
|
zhenyuw16
|
python
|
__init__
|
def __init__(self,
inplanes,
planes,
groups=1,
base_width=4,
base_channels=64,
**kwargs):
"""Bottleneck block for ResNeXt.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes *
(base_width / base_channels)) * groups
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
self.norm_cfg, width, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if self.with_sac:
self.conv2 = build_conv_layer(
self.sac,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
elif not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
|
"""Bottleneck block for ResNeXt.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
|
https://github.com/zhenyuw16/UniDetector/blob/eb182535178ecfad18142bed2e03b458a0a8f451/mmdet/models/backbones/detectors_resnext.py#L14-L95
|
eb182535178ecfad18142bed2e03b458a0a8f451
|
UniDetector
|
github_2023
|
zhenyuw16
|
python
|
init_weights
|
def init_weights(self):
"""Initialize weights of the head."""
bias_init = bias_init_with_prob(0.1)
self.heatmap_head[-1].bias.data.fill_(bias_init)
for head in [self.wh_head, self.offset_head]:
for m in head.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.001)
|
"""Initialize weights of the head."""
|
https://github.com/zhenyuw16/UniDetector/blob/eb182535178ecfad18142bed2e03b458a0a8f451/mmdet/models/dense_heads/centernet_head.py#L72-L79
|
eb182535178ecfad18142bed2e03b458a0a8f451
|
robocorp
|
github_2023
|
robocorp
|
python
|
insert_missing_modules
|
def insert_missing_modules(modules: Dict[str, ModuleType], module_name: str) -> None:
"""
Used by ``import_path`` to create intermediate modules.
When we want to import a module as "src.tests.test_foo" for example, we need
to create empty modules "src" and "src.tests" after inserting "src.tests.test_foo",
otherwise "src.tests.test_foo" is not importable by ``__import__``.
Based on: https://github.com/pytest-dev/pytest/blob/main/src/_pytest/pathlib.py
"""
import importlib
module_parts = module_name.split(".")
while module_name:
if module_name not in modules:
try:
# If sys.meta_path is empty, calling import_module will issue
# a warning and raise ModuleNotFoundError. To avoid the
# warning, we check sys.meta_path explicitly and raise the error
# ourselves to fall back to creating a dummy module.
if not sys.meta_path:
raise ModuleNotFoundError
importlib.import_module(module_name)
except ModuleNotFoundError:
module = ModuleType(
module_name,
doc="Empty module created by robocorp-tasks.",
)
modules[module_name] = module
module_parts.pop(-1)
module_name = ".".join(module_parts)
|
"""
Used by ``import_path`` to create intermediate modules.
When we want to import a module as "src.tests.test_foo" for example, we need
to create empty modules "src" and "src.tests" after inserting "src.tests.test_foo",
otherwise "src.tests.test_foo" is not importable by ``__import__``.
Based on: https://github.com/pytest-dev/pytest/blob/main/src/_pytest/pathlib.py
"""
|
https://github.com/robocorp/robocorp/blob/3df7714109713269f9e6122254bd0d97a55e9f6a/tasks/src/robocorp/tasks/_collect_tasks.py#L37-L66
|
3df7714109713269f9e6122254bd0d97a55e9f6a
|
robocorp
|
github_2023
|
robocorp
|
python
|
__init__
|
def __init__(self, pattern=None):
"""Refer https://docs.microsoft.com/en-us/windows/win32/api/uiautomationclient/nn-uiautomationclient-iuiautomationinvokepattern"""
self.pattern = pattern
|
"""Refer https://docs.microsoft.com/en-us/windows/win32/api/uiautomationclient/nn-uiautomationclient-iuiautomationinvokepattern"""
|
https://github.com/robocorp/robocorp/blob/3df7714109713269f9e6122254bd0d97a55e9f6a/windows/src/robocorp/windows/_vendored/uiautomation/uiautomation.py#L4192-L4194
|
3df7714109713269f9e6122254bd0d97a55e9f6a
|
ViPT
|
github_2023
|
jiawen-zhu
|
python
|
forward
|
def forward(self, x):
'''
x: [batch_size, features, k]
'''
b, c, h, w = x.shape
x = x.contiguous().view(b, c, h*w)
if self.smooth:
mask = self.softmax(x * self.smooth)
else:
mask = self.softmax(x)
output = mask * x
output = output.contiguous().view(b, c, h, w)
return output
|
'''
x: [batch_size, features, k]
'''
|
https://github.com/jiawen-zhu/ViPT/blob/b316fb0cf29a0552f169360556bdc691e43f8452/lib/models/vipt/vit_ce_prompt.py#L33-L47
|
b316fb0cf29a0552f169360556bdc691e43f8452
|
torch-merf
|
github_2023
|
ashawkey
|
python
|
_march_rays_train.forward
|
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(ctx, rays_o, rays_d, bound, contract, density_bitfield, C, H, nears, fars, perturb=False, dt_gamma=0, max_steps=1024):
''' march rays to generate points (forward only)
Args:
rays_o/d: float, [N, 3]
bound: float, scalar
density_bitfield: uint8: [CHHH // 8]
C: int
H: int
nears/fars: float, [N]
step_counter: int32, (2), used to count the actual number of generated points.
mean_count: int32, estimated mean steps to accelerate training. (but will randomly drop rays if the actual point count exceeded this threshold.)
perturb: bool
align: int, pad output so its size is dividable by align, set to -1 to disable.
force_all_rays: bool, ignore step_counter and mean_count, always calculate all rays. Useful if rendering the whole image, instead of some rays.
dt_gamma: float, called cone_angle in instant-ngp, exponentially accelerate ray marching if > 0. (very significant effect, but generally lead to worse performance)
max_steps: int, max number of sampled points along each ray, also affect min_stepsize.
Returns:
xyzs: float, [M, 3], all generated points' coords. (all rays concated, need to use `rays` to extract points belonging to each ray)
dirs: float, [M, 3], all generated points' view dirs.
ts: float, [M, 2], all generated points' ts.
rays: int32, [N, 2], all rays' (point_offset, point_count), e.g., xyzs[rays[i, 0]:(rays[i, 0] + rays[i, 1])] --> points belonging to rays[i, 0]
'''
if not rays_o.is_cuda: rays_o = rays_o.cuda()
if not rays_d.is_cuda: rays_d = rays_d.cuda()
if not density_bitfield.is_cuda: density_bitfield = density_bitfield.cuda()
rays_o = rays_o.float().contiguous().view(-1, 3)
rays_d = rays_d.float().contiguous().view(-1, 3)
density_bitfield = density_bitfield.contiguous()
N = rays_o.shape[0] # num rays
step_counter = torch.zeros(1, dtype=torch.int32, device=rays_o.device) # point counter, ray counter
if perturb:
noises = torch.rand(N, dtype=rays_o.dtype, device=rays_o.device)
else:
noises = torch.zeros(N, dtype=rays_o.dtype, device=rays_o.device)
# first pass: write rays, get total number of points M to render
rays = torch.empty(N, 2, dtype=torch.int32, device=rays_o.device) # id, offset, num_steps
get_backend().march_rays_train(rays_o, rays_d, density_bitfield, bound, contract, dt_gamma, max_steps, N, C, H, nears, fars, None, None, None, rays, step_counter, noises)
# allocate based on M
M = step_counter.item()
xyzs = torch.zeros(M, 3, dtype=rays_o.dtype, device=rays_o.device)
dirs = torch.zeros(M, 3, dtype=rays_o.dtype, device=rays_o.device)
ts = torch.zeros(M, 2, dtype=rays_o.dtype, device=rays_o.device)
# second pass: write outputs
get_backend().march_rays_train(rays_o, rays_d, density_bitfield, bound, contract, dt_gamma, max_steps, N, C, H, nears, fars, xyzs, dirs, ts, rays, step_counter, noises)
return xyzs, dirs, ts, rays
|
''' march rays to generate points (forward only)
Args:
rays_o/d: float, [N, 3]
bound: float, scalar
density_bitfield: uint8: [CHHH // 8]
C: int
H: int
nears/fars: float, [N]
step_counter: int32, (2), used to count the actual number of generated points.
mean_count: int32, estimated mean steps to accelerate training. (but will randomly drop rays if the actual point count exceeded this threshold.)
perturb: bool
align: int, pad output so its size is dividable by align, set to -1 to disable.
force_all_rays: bool, ignore step_counter and mean_count, always calculate all rays. Useful if rendering the whole image, instead of some rays.
dt_gamma: float, called cone_angle in instant-ngp, exponentially accelerate ray marching if > 0. (very significant effect, but generally lead to worse performance)
max_steps: int, max number of sampled points along each ray, also affect min_stepsize.
Returns:
xyzs: float, [M, 3], all generated points' coords. (all rays concated, need to use `rays` to extract points belonging to each ray)
dirs: float, [M, 3], all generated points' view dirs.
ts: float, [M, 2], all generated points' ts.
rays: int32, [N, 2], all rays' (point_offset, point_count), e.g., xyzs[rays[i, 0]:(rays[i, 0] + rays[i, 1])] --> points belonging to rays[i, 0]
'''
|
https://github.com/ashawkey/torch-merf/blob/a669be605349c3af5167832f8ead6f69bbf8e697/raymarching/raymarching.py#L195-L251
|
a669be605349c3af5167832f8ead6f69bbf8e697
|
hnsqlite
|
github_2023
|
jiggy-ai
|
python
|
Collection._save_index_to_disk
|
@classmethod
def _save_index_to_disk(cls, name: str, hnsw_ix : dbHnswIndexConfig) -> Tuple[str, str, int]:
"""
Save the current index to disk and return the filename, md5sum, and count of items in the index
"""
count = len(hnsw_ix.get_ids_list())
filename = f"index_{name}.hnsw"
try:
os.unlink(filename)
except:
pass
hnsw_ix.save_index(filename)
md5sum = md5_file(filename)
logger.info(f"saved index to {filename} with md5sum {md5sum} and {count} items")
return filename, md5sum, count
|
"""
Save the current index to disk and return the filename, md5sum, and count of items in the index
"""
|
https://github.com/jiggy-ai/hnsqlite/blob/9824e6c73508d844ea3424ba9f3033da46b9de9f/hnsqlite/collection.py#L255-L269
|
9824e6c73508d844ea3424ba9f3033da46b9de9f
|
3D_Corruptions_AD
|
github_2023
|
thu-ml
|
python
|
apply_jigsaw
|
def apply_jigsaw(arr, destinations):
"""Move cells of an image similar to a jigsaw puzzle.
This function will split the image into ``rows x cols`` cells and
move each cell to the target index given in `destinations`.
Added in 0.4.0.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: yes; fully tested
* ``uint32``: yes; fully tested
* ``uint64``: yes; fully tested
* ``int8``: yes; fully tested
* ``int16``: yes; fully tested
* ``int32``: yes; fully tested
* ``int64``: yes; fully tested
* ``float16``: yes; fully tested
* ``float32``: yes; fully tested
* ``float64``: yes; fully tested
* ``float128``: yes; fully tested
* ``bool``: yes; fully tested
Parameters
----------
arr : ndarray
Array with at least two dimensions denoting height and width.
destinations : ndarray
2-dimensional array containing for each cell the id of the destination
cell. The order is expected to a flattened c-order, i.e. row by row.
The height of the image must be evenly divisible by the number of
rows in this array. Analogous for the width and columns.
Returns
-------
ndarray
Modified image with cells moved according to `destinations`.
"""
# pylint complains about unravel_index() here
# pylint: disable=unbalanced-tuple-unpacking
nb_rows, nb_cols = destinations.shape[0:2]
assert arr.ndim >= 2, (
"Expected array with at least two dimensions, but got %d with "
"shape %s." % (arr.ndim, arr.shape))
assert (arr.shape[0] % nb_rows) == 0, (
"Expected image height to by divisible by number of rows, but got "
"height %d and %d rows. Use cropping or padding to modify the image "
"height or change the number of rows." % (arr.shape[0], nb_rows)
)
assert (arr.shape[1] % nb_cols) == 0, (
"Expected image width to by divisible by number of columns, but got "
"width %d and %d columns. Use cropping or padding to modify the image "
"width or change the number of columns." % (arr.shape[1], nb_cols)
)
cell_height = arr.shape[0] // nb_rows
cell_width = arr.shape[1] // nb_cols
dest_rows, dest_cols = np.unravel_index(
destinations.flatten(), (nb_rows, nb_cols))
result = np.zeros_like(arr)
i = 0
for source_row in np.arange(nb_rows):
for source_col in np.arange(nb_cols):
# TODO vectorize coords computation
dest_row, dest_col = dest_rows[i], dest_cols[i]
source_y1 = source_row * cell_height
source_y2 = source_y1 + cell_height
source_x1 = source_col * cell_width
source_x2 = source_x1 + cell_width
dest_y1 = dest_row * cell_height
dest_y2 = dest_y1 + cell_height
dest_x1 = dest_col * cell_width
dest_x2 = dest_x1 + cell_width
source = arr[source_y1:source_y2, source_x1:source_x2]
result[dest_y1:dest_y2, dest_x1:dest_x2] = source
i += 1
return result
|
"""Move cells of an image similar to a jigsaw puzzle.
This function will split the image into ``rows x cols`` cells and
move each cell to the target index given in `destinations`.
Added in 0.4.0.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: yes; fully tested
* ``uint32``: yes; fully tested
* ``uint64``: yes; fully tested
* ``int8``: yes; fully tested
* ``int16``: yes; fully tested
* ``int32``: yes; fully tested
* ``int64``: yes; fully tested
* ``float16``: yes; fully tested
* ``float32``: yes; fully tested
* ``float64``: yes; fully tested
* ``float128``: yes; fully tested
* ``bool``: yes; fully tested
Parameters
----------
arr : ndarray
Array with at least two dimensions denoting height and width.
destinations : ndarray
2-dimensional array containing for each cell the id of the destination
cell. The order is expected to a flattened c-order, i.e. row by row.
The height of the image must be evenly divisible by the number of
rows in this array. Analogous for the width and columns.
Returns
-------
ndarray
Modified image with cells moved according to `destinations`.
"""
|
https://github.com/thu-ml/3D_Corruptions_AD/blob/48c23f77fe82beab599f8248b7794928334a3fb5/OpenPCDet/OpenPCDet/pcdet/datasets/kitti/utils/imgaug/augmenters/geometric.py#L386-L474
|
48c23f77fe82beab599f8248b7794928334a3fb5
|
3D_Corruptions_AD
|
github_2023
|
thu-ml
|
python
|
remove_out_of_image_
|
def remove_out_of_image_(self, fully=True, partly=False):
"""
Remove all LS that are fully/partially outside of an image in-place.
Added in 0.4.0.
Parameters
----------
fully : bool, optional
Whether to remove line strings that are fully outside of the image.
partly : bool, optional
Whether to remove line strings that are partially outside of the
image.
Returns
-------
imgaug.augmentables.lines.LineStringsOnImage
Reduced set of line strings. Those that are fully/partially
outside of the given image plane are removed.
The object and its items may have been modified in-place.
"""
self.line_strings = [
ls for ls in self.line_strings
if not ls.is_out_of_image(self.shape, fully=fully, partly=partly)]
return self
|
"""
Remove all LS that are fully/partially outside of an image in-place.
Added in 0.4.0.
Parameters
----------
fully : bool, optional
Whether to remove line strings that are fully outside of the image.
partly : bool, optional
Whether to remove line strings that are partially outside of the
image.
Returns
-------
imgaug.augmentables.lines.LineStringsOnImage
Reduced set of line strings. Those that are fully/partially
outside of the given image plane are removed.
The object and its items may have been modified in-place.
"""
|
https://github.com/thu-ml/3D_Corruptions_AD/blob/48c23f77fe82beab599f8248b7794928334a3fb5/utils/imgaug/augmentables/lines.py#L1928-L1954
|
48c23f77fe82beab599f8248b7794928334a3fb5
|
CEASC
|
github_2023
|
Cuogeihong
|
python
|
show_result
|
def show_result(self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=13,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (dict): The results.
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'.
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'.
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None.
thickness (int): Thickness of lines. Default: 2.
font_size (int): Font size of texts. Default: 13.
win_name (str): The window name. Default: ''.
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`.
"""
img = mmcv.imread(img)
img = img.copy()
pan_results = result['pan_results']
# keep objects ahead
ids = np.unique(pan_results)[::-1]
legal_indices = ids != self.num_classes # for VOID label
ids = ids[legal_indices]
labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)
segms = (pan_results[None] == ids[:, None, None])
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
img = imshow_det_bboxes(
img,
segms=segms,
labels=labels,
class_names=self.CLASSES,
bbox_color=bbox_color,
text_color=text_color,
mask_color=mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img
|
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (dict): The results.
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'.
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'.
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None.
thickness (int): Thickness of lines. Default: 2.
font_size (int): Font size of texts. Default: 13.
win_name (str): The window name. Default: ''.
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`.
"""
|
https://github.com/Cuogeihong/CEASC/blob/2abfd1a99f1b0fe1ed3d51588b64549e1584da50/mmdet/models/detectors/panoptic_two_stage_segmentor.py#L208-L279
|
2abfd1a99f1b0fe1ed3d51588b64549e1584da50
|
CEASC
|
github_2023
|
Cuogeihong
|
python
|
forward_train
|
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""HeuristicFusionHead has no training loss."""
return dict()
|
"""HeuristicFusionHead has no training loss."""
|
https://github.com/Cuogeihong/CEASC/blob/2abfd1a99f1b0fe1ed3d51588b64549e1584da50/mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py#L23-L25
|
2abfd1a99f1b0fe1ed3d51588b64549e1584da50
|
GPTCache
|
github_2023
|
zilliztech
|
python
|
EvictionBase
|
def EvictionBase(name: str, **kwargs):
"""Generate specific CacheStorage with the configuration.
:param name: the name of the eviction, like: memory
:type name: str
:param policy: eviction strategy
:type policy: str
:param maxsize: the maxsize of cache data
:type maxsize: int
:param clean_size: will clean the size of data when the size of cache data reaches the max size
:type clean_size: int
:param on_evict: the function for cleaning the data in the store
:type on_evict: Callable[[List[Any]], None]
Example:
.. code-block:: python
from gptcache.manager import EvictionBase
cache_base = EvictionBase('memory', policy='lru', maxsize=10, clean_size=2, on_evict=lambda x: print(x))
"""
return eviction_manager.EvictionBase.get(name, **kwargs)
|
"""Generate specific CacheStorage with the configuration.
:param name: the name of the eviction, like: memory
:type name: str
:param policy: eviction strategy
:type policy: str
:param maxsize: the maxsize of cache data
:type maxsize: int
:param clean_size: will clean the size of data when the size of cache data reaches the max size
:type clean_size: int
:param on_evict: the function for cleaning the data in the store
:type on_evict: Callable[[List[Any]], None]
Example:
.. code-block:: python
from gptcache.manager import EvictionBase
cache_base = EvictionBase('memory', policy='lru', maxsize=10, clean_size=2, on_evict=lambda x: print(x))
"""
|
https://github.com/zilliztech/GPTCache/blob/48f8e768d7dcd7f66d948ad07914a630a382b45b/gptcache/manager/eviction/__init__.py#L10-L32
|
48f8e768d7dcd7f66d948ad07914a630a382b45b
|
gptme
|
github_2023
|
ErikBjare
|
python
|
write
|
def write(self, branches=True) -> None:
"""
Writes to the conversation log.
"""
# create directory if it doesn't exist
Path(self.logfile).parent.mkdir(parents=True, exist_ok=True)
# write current branch
self.log.write_jsonl(self.logfile)
# write other branches
# FIXME: wont write main branch if on a different branch
if branches:
branches_dir = self.logdir / "branches"
branches_dir.mkdir(parents=True, exist_ok=True)
for branch, log in self._branches.items():
if branch == "main":
continue
branch_path = branches_dir / f"{branch}.jsonl"
log.write_jsonl(branch_path)
|
"""
Writes to the conversation log.
"""
|
https://github.com/ErikBjare/gptme/blob/ebc076bb75a3af2eafbe498634abb032772f11df/gptme/logmanager.py#L167-L186
|
ebc076bb75a3af2eafbe498634abb032772f11df
|
yolov8-face
|
github_2023
|
derronqi
|
python
|
set_api_key
|
def set_api_key(self, key: str):
"""
Set the API key for authentication.
Args:
key (str): The API key string.
"""
self.api_key = key
|
"""
Set the API key for authentication.
Args:
key (str): The API key string.
"""
|
https://github.com/derronqi/yolov8-face/blob/18f9fde9862ecee74a28e56a8f09bbfc3bcff6d4/ultralytics/hub/auth.py#L132-L139
|
18f9fde9862ecee74a28e56a8f09bbfc3bcff6d4
|
yolov8-face
|
github_2023
|
derronqi
|
python
|
__init__
|
def __init__(self, c1, c2, num_heads, num_layers):
"""Initialize a Transformer module with position embedding and specified number of heads and layers."""
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
self.c2 = c2
|
"""Initialize a Transformer module with position embedding and specified number of heads and layers."""
|
https://github.com/derronqi/yolov8-face/blob/18f9fde9862ecee74a28e56a8f09bbfc3bcff6d4/ultralytics/nn/modules/transformer.py#L123-L131
|
18f9fde9862ecee74a28e56a8f09bbfc3bcff6d4
|
q-diffusion
|
github_2023
|
Xiuyu-Li
|
python
|
makedir_exist_ok
|
def makedir_exist_ok(dirpath):
"""
Python2 support for os.makedirs(.., exist_ok=True)
"""
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
|
"""
Python2 support for os.makedirs(.., exist_ok=True)
"""
|
https://github.com/Xiuyu-Li/q-diffusion/blob/715783da70baa267321d6700ceb8941400c309d1/ddim/datasets/utils.py#L36-L46
|
715783da70baa267321d6700ceb8941400c309d1
|
prompt-pretraining
|
github_2023
|
amazon-science
|
python
|
__init__
|
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
max_dets_per_image=None,
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have the following corresponding metadata:
"json_file": the path to the LVIS format annotation
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks for evaluation.
Otherwise, will evaluate the results in the current process.
output_dir (str): optional, an output directory to dump results.
max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP
This limit, by default of the LVIS dataset, is 300.
"""
from lvis import LVIS
self._logger = logging.getLogger(__name__)
if tasks is not None and isinstance(tasks, CfgNode):
self._logger.warn(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._distributed = distributed
self._output_dir = output_dir
self._max_dets_per_image = max_dets_per_image
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
json_file = PathManager.get_local_path(self._metadata.json_file)
self._lvis_api = LVIS(json_file)
# Test set json files do not contain annotations (evaluation must be
# performed using the LVIS evaluation server).
self._do_evaluation = len(self._lvis_api.get_ann_ids()) > 0
file_path = "/home/ubuntu/efs/Detic/output/Detic/Detic_LI_CLIP_R5021k_640b64_4x_ft4x_max-size/inference_lvis_v1_val/instances_predictions.pth"
with PathManager.open(file_path, "rb") as f:
self._predictions = torch.load(f)
|
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have the following corresponding metadata:
"json_file": the path to the LVIS format annotation
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks for evaluation.
Otherwise, will evaluate the results in the current process.
output_dir (str): optional, an output directory to dump results.
max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP
This limit, by default of the LVIS dataset, is 300.
"""
|
https://github.com/amazon-science/prompt-pretraining/blob/24bca56b21b4fab1d493c8758c31fd6d1c40bb96/third_party/Detic/cherry_pick.py#L627-L677
|
24bca56b21b4fab1d493c8758c31fd6d1c40bb96
|
AttentionShift
|
github_2023
|
MingXiangL
|
python
|
gen_single_level_base_anchors
|
def gen_single_level_base_anchors(self, base_sizes_per_level, center=None):
"""Generate base anchors of a single level.
Args:
base_sizes_per_level (list[tuple[int, int]]): Basic sizes of
anchors.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature maps.
"""
x_center, y_center = center
base_anchors = []
for base_size in base_sizes_per_level:
w, h = base_size
# use float anchor and the anchor's center is aligned with the
# pixel center
base_anchor = torch.Tensor([
x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w,
y_center + 0.5 * h
])
base_anchors.append(base_anchor)
base_anchors = torch.stack(base_anchors, dim=0)
return base_anchors
|
"""Generate base anchors of a single level.
Args:
base_sizes_per_level (list[tuple[int, int]]): Basic sizes of
anchors.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature maps.
"""
|
https://github.com/MingXiangL/AttentionShift/blob/dc3b87d35d2334d8675cb899ead2c02d74c163c1/mmdet/core/anchor/anchor_generator.py#L639-L665
|
dc3b87d35d2334d8675cb899ead2c02d74c163c1
|
uuid-utils
|
github_2023
|
aminalaee
|
python
|
uuid8
|
def uuid8(bytes):
"""Generate a custom UUID comprised almost entirely of user-supplied bytes.."""
return UUID(bytes=uuid_utils.uuid8(bytes).bytes)
|
"""Generate a custom UUID comprised almost entirely of user-supplied bytes.."""
|
https://github.com/aminalaee/uuid-utils/blob/9ddd132c46278ac8aeb70474e688acec3465ce30/python/uuid_utils/compat/__init__.py#L75-L77
|
9ddd132c46278ac8aeb70474e688acec3465ce30
|
the-algorithm
|
github_2023
|
twitter
|
python
|
__init__
|
def __init__(self, calibrator_name=None, **kwargs):
'''
Arguments:
calibrator_name.
Default: if set to None it will be the same as the class name.
Please be reminded that if in the model there are many calibrators
of the same type the calibrator_name should be changed to avoid confusion.
'''
self._calibrated = False
if calibrator_name is None:
calibrator_name = twml.util.to_snake_case(self.__class__.__name__)
self._calibrator_name = calibrator_name
self._kwargs = kwargs
|
'''
Arguments:
calibrator_name.
Default: if set to None it will be the same as the class name.
Please be reminded that if in the model there are many calibrators
of the same type the calibrator_name should be changed to avoid confusion.
'''
|
https://github.com/twitter/the-algorithm/blob/72eda9a24f815f6d566818cbf8518138e29d83e9/twml/twml/contrib/calibrators/calibrator.py#L62-L74
|
72eda9a24f815f6d566818cbf8518138e29d83e9
|
Retrieval-based-Voice-Conversion-WebUI
|
github_2023
|
RVC-Project
|
python
|
_absolute_position_to_relative_position
|
def _absolute_position_to_relative_position(self, x):
"""
x: [b, h, l, l]
ret: [b, h, l, 2*l-1]
"""
batch, heads, length, _ = x.size()
# padd along column
x = F.pad(
x,
[0, length - 1, 0, 0, 0, 0, 0, 0],
)
x_flat = x.view([batch, heads, length*length + length * (length - 1)])
# add 0's in the beginning that will skew the elements after reshape
x_flat = F.pad(
x_flat,
[length, 0, 0, 0, 0, 0],
)
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final
|
"""
x: [b, h, l, l]
ret: [b, h, l, 2*l-1]
"""
|
https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/7ef19867780cf703841ebafb565a4e47d1ea86ff/infer/lib/infer_pack/attentions_onnx.py#L356-L374
|
7ef19867780cf703841ebafb565a4e47d1ea86ff
|
timefold-solver
|
github_2023
|
TimefoldAI
|
python
|
SolutionManager.create
|
@staticmethod
def create(solver_factory: SolverFactory[Solution_] | SolverManager[Solution_, Any]) -> \
'SolutionManager[Solution_]':
"""
Uses a `SolverFactory` or `SolverManager` to build a SolutionManager.
Parameters
----------
solver_factory : SolverFactory | SolverManager
Returns
-------
SolutionManager
A `SolutionManager` instance.
"""
from ai.timefold.solver.core.api.solver import SolutionManager as JavaSolutionManager
return SolutionManager(JavaSolutionManager.create(solver_factory._delegate))
|
"""
Uses a `SolverFactory` or `SolverManager` to build a SolutionManager.
Parameters
----------
solver_factory : SolverFactory | SolverManager
Returns
-------
SolutionManager
A `SolutionManager` instance.
"""
|
https://github.com/TimefoldAI/timefold-solver/blob/f67c507a421ee113dd2e76f825480aa058b14767/python/python-core/src/main/python/_solution_manager.py#L29-L45
|
f67c507a421ee113dd2e76f825480aa058b14767
|
timefold-solver
|
github_2023
|
TimefoldAI
|
python
|
diff
|
def diff(self, other: 'ScoreAnalysis') -> 'ScoreAnalysis':
"""
Compare this `ScoreAnalysis to another `ScoreAnalysis`
and retrieve the difference between them.
The comparison is in the direction of `this - other`.
Example: if `this` has a score of 100 and `other` has a score of 90,
the returned score will be 10.
If this and other were inverted, the score would have been -10.
The same applies to all other properties of `ScoreAnalysis`.
In order to properly diff `MatchAnalysis` against each other,
we rely on the user implementing `ConstraintJustification` equality correctly.
In other words, the diff will consider two justifications equal if the user says they are equal,
and it expects the hash code to be consistent with equals.
If one `ScoreAnalysis` provides `MatchAnalysis` and the other doesn't, exception is thrown.
Such `ScoreAnalysis` instances are mutually incompatible.
Parameters
----------
other : ScoreAnalysis
Returns
-------
ScoreExplanation
The `ScoreAnalysis` corresponding to the diff.
"""
return ScoreAnalysis(self._delegate.diff(other._delegate))
|
"""
Compare this `ScoreAnalysis to another `ScoreAnalysis`
and retrieve the difference between them.
The comparison is in the direction of `this - other`.
Example: if `this` has a score of 100 and `other` has a score of 90,
the returned score will be 10.
If this and other were inverted, the score would have been -10.
The same applies to all other properties of `ScoreAnalysis`.
In order to properly diff `MatchAnalysis` against each other,
we rely on the user implementing `ConstraintJustification` equality correctly.
In other words, the diff will consider two justifications equal if the user says they are equal,
and it expects the hash code to be consistent with equals.
If one `ScoreAnalysis` provides `MatchAnalysis` and the other doesn't, exception is thrown.
Such `ScoreAnalysis` instances are mutually incompatible.
Parameters
----------
other : ScoreAnalysis
Returns
-------
ScoreExplanation
The `ScoreAnalysis` corresponding to the diff.
"""
|
https://github.com/TimefoldAI/timefold-solver/blob/f67c507a421ee113dd2e76f825480aa058b14767/python/python-core/src/main/python/score/_score_analysis.py#L631-L659
|
f67c507a421ee113dd2e76f825480aa058b14767
|
JaxMARL
|
github_2023
|
FLAIROx
|
python
|
JaxNav.reset
|
@partial(jax.jit, static_argnums=[0])
def reset(self, key: chex.PRNGKey) -> Tuple[Dict[str, chex.Array], State]:
""" Reset environment. Returns initial agent observations, states and the enviornment state """
state = self.sample_test_case(key)
obs = self._get_obs(state)
return {a: obs[i] for i, a in enumerate(self.agents)}, state
|
""" Reset environment. Returns initial agent observations, states and the enviornment state """
|
https://github.com/FLAIROx/JaxMARL/blob/3dc2cf6e002b5f1b97ce3edd45aff7e1c003f8e3/jaxmarl/environments/jaxnav/jaxnav_env.py#L202-L208
|
3dc2cf6e002b5f1b97ce3edd45aff7e1c003f8e3
|
JaxMARL
|
github_2023
|
FLAIROx
|
python
|
action_space
|
def action_space(
self, agent_id: Union[int, None] = None
) -> spaces.Discrete:
"""Action space of the environment."""
return spaces.Discrete(len(Actions))
|
"""Action space of the environment."""
|
https://github.com/FLAIROx/JaxMARL/blob/3dc2cf6e002b5f1b97ce3edd45aff7e1c003f8e3/jaxmarl/environments/storm/storm_2p.py#L921-L925
|
3dc2cf6e002b5f1b97ce3edd45aff7e1c003f8e3
|
unmasked_teacher
|
github_2023
|
OpenGVLab
|
python
|
encode_teacher
|
def encode_teacher(self, image):
"""encode image / videos as features.
Args:
image (torch.Tensor): The input images.
Returns: tuple.
- mask (torch.Tensor): Mask. Shape: [B,N1].
- clip_output (torch.Tensor): The features of clip. Shape: [K,B,N,C].
"""
B, C, T, H, W = image.shape
mask_type = self.image_mask_type if T == 1 else self.video_mask_type
window_size = self.image_window_size if T == 1 else self.video_window_size
mask_ratio = self.image_mask_ratio if T == 1 else self.video_mask_ratio
if self.clip_teacher is None or self.loss_weight.uta == 0:
return None, None
if H != self.clip_img_size:
image = torch.nn.functional.interpolate(
image.reshape(B, C*T, H, W),
size=(self.clip_img_size, self.clip_img_size),
mode='bicubic', align_corners=False
)
image = image.view(B, C, T, self.clip_img_size, self.clip_img_size)
with torch.no_grad():
if mask_type == 'tube':
mask = TubeMaskingGenerator(window_size, mask_ratio, B)
clip_output, attn = self.clip_teacher(image)
elif mask_type == 'random':
mask = RandomMaskingGenerator(window_size, mask_ratio, B)
clip_output, attn = self.clip_teacher(image)
elif mask_type in 'attention':
clip_output, attn = self.clip_teacher(image)
BT, N = attn.shape
N_vis = N - int(N * mask_ratio)
importance = torch.multinomial(attn, N)
mask = torch.ones((BT, N))
pos1 = torch.arange(BT).view(-1, 1).repeat(1, N_vis)
pos2 = importance[:, :N_vis]
mask[pos1, pos2] = 0
mask = mask.view(B, -1).to(torch.bool)
else:
raise NotImplementedError
# mask clip output
K, _, _, C = clip_output.shape
mask_clip = mask.unsqueeze(0).repeat(K, 1, 1)
clip_output = clip_output[~mask_clip].reshape(K, B, -1, C)
return mask, clip_output
|
"""encode image / videos as features.
Args:
image (torch.Tensor): The input images.
Returns: tuple.
- mask (torch.Tensor): Mask. Shape: [B,N1].
- clip_output (torch.Tensor): The features of clip. Shape: [K,B,N,C].
"""
|
https://github.com/OpenGVLab/unmasked_teacher/blob/4fb4049f5a87919882e68ccc427615ae7dab1c33/multi_modality/models/umt.py#L117-L169
|
4fb4049f5a87919882e68ccc427615ae7dab1c33
|
YOSO
|
github_2023
|
hujiecpp
|
python
|
load_lvis_json
|
def load_lvis_json(json_file, image_root, dataset_name=None):
"""
Load a json file in LVIS's annotation format.
Args:
json_file (str): full path to the LVIS json annotation file.
image_root (str): the directory where the images in this json file exists.
dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train").
If provided, this function will put "thing_classes" into the metadata
associated with this dataset.
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
"""
from lvis import LVIS
json_file = PathManager.get_local_path(json_file)
timer = Timer()
lvis_api = LVIS(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
if dataset_name is not None:
meta = get_lvis_instances_meta(dataset_name)
MetadataCatalog.get(dataset_name).set(**meta)
# sort indices for reproducible results
img_ids = sorted(lvis_api.imgs.keys())
# imgs is a list of dicts, each looks something like:
# {'license': 4,
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
# 'file_name': 'COCO_val2014_000000001268.jpg',
# 'height': 427,
# 'width': 640,
# 'date_captured': '2013-11-17 05:57:24',
# 'id': 1268}
imgs = lvis_api.load_imgs(img_ids)
# anns is a list[list[dict]], where each dict is an annotation
# record for an object. The inner list enumerates the objects in an image
# and the outer list enumerates over images. Example of anns[0]:
# [{'segmentation': [[192.81,
# 247.09,
# ...
# 219.03,
# 249.06]],
# 'area': 1035.749,
# 'image_id': 1268,
# 'bbox': [192.81, 224.8, 74.73, 33.43],
# 'category_id': 16,
# 'id': 42986},
# ...]
anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
# Sanity check that each annotation has a unique id
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique".format(
json_file
)
imgs_anns = list(zip(imgs, anns))
logger.info("Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file))
def get_file_name(img_root, img_dict):
# Determine the path including the split folder ("train2017", "val2017", "test2017") from
# the coco_url field. Example:
# 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg'
split_folder, file_name = img_dict["coco_url"].split("/")[-2:]
return os.path.join(img_root + split_folder, file_name)
dataset_dicts = []
for (img_dict, anno_dict_list) in imgs_anns:
record = {}
record["file_name"] = get_file_name(image_root, img_dict)
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", [])
record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
image_id = record["image_id"] = img_dict["id"]
objs = []
for anno in anno_dict_list:
# Check that the image_id in this annotation is the same as
# the image_id we're looking at.
# This fails only when the data parsing logic or the annotation file is buggy.
assert anno["image_id"] == image_id
obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
obj["category_id"] = anno["category_id"] - 1 # Convert 1-indexed to 0-indexed
segm = anno["segmentation"] # list[list[float]]
# filter out invalid polygons (< 3 points)
valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
assert len(segm) == len(
valid_segm
), "Annotation contains an invalid polygon with < 3 points"
assert len(segm) > 0
obj["segmentation"] = segm
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
|
"""
Load a json file in LVIS's annotation format.
Args:
json_file (str): full path to the LVIS json annotation file.
image_root (str): the directory where the images in this json file exists.
dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train").
If provided, this function will put "thing_classes" into the metadata
associated with this dataset.
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
"""
|
https://github.com/hujiecpp/YOSO/blob/04b898d395ffd8318aa3761b0b2b6d20b3514f26/detectron2/data/datasets/lvis.py#L40-L147
|
04b898d395ffd8318aa3761b0b2b6d20b3514f26
|
FreeSeg
|
github_2023
|
bytedance
|
python
|
__call__
|
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
# assert self.is_train, "MaskFormerSemanticDatasetMapper should only be used for training!"
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if "sem_seg_file_name" in dataset_dict:
# PyTorch transformation not implemented for uint16, so converting it to double first
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype(
"double"
)
else:
sem_seg_gt = None
if sem_seg_gt is None:
raise ValueError(
"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.".format(
dataset_dict["file_name"]
)
)
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
aug_input.category_id = dataset_dict["category_id"]
aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)
image = aug_input.image
sem_seg_gt = aug_input.sem_seg
# Pad image and segmentation label here!
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if sem_seg_gt is not None:
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
if self.size_divisibility > 0:
image_size = (image.shape[-2], image.shape[-1])
padding_size = [
0,
self.size_divisibility - image_size[1],
0,
self.size_divisibility - image_size[0],
]
image = F.pad(image, padding_size, value=128).contiguous()
if sem_seg_gt is not None:
sem_seg_gt = F.pad(
sem_seg_gt, padding_size, value=self.ignore_label
).contiguous()
image_shape = (image.shape[-2], image.shape[-1]) # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = image
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = sem_seg_gt.long()
if "annotations" in dataset_dict:
raise ValueError(
"Semantic segmentation dataset should not have 'annotations'."
)
# Prepare per-category binary masks
if sem_seg_gt is not None:
sem_seg_gt = sem_seg_gt.numpy()
instances = Instances(image_shape)
instances.gt_classes = torch.tensor(
[dataset_dict["category_id"]], dtype=torch.int64
)
masks = []
masks.append(sem_seg_gt == dataset_dict["category_id"])
if masks[0].sum() == 0:
return None
if len(masks) == 0:
# Some image does not have annotation (all ignored)
instances.gt_masks = torch.zeros(
(0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1])
)
else:
masks = BitMasks(
torch.stack(
[
torch.from_numpy(np.ascontiguousarray(x.copy()))
for x in masks
]
)
)
instances.gt_masks = masks.tensor
dataset_dict["instances"] = instances
return dataset_dict
|
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
|
https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/dataset_mappers/mask_former_binary_semantic_dataset_mapper.py#L95-L194
|
7707335cc3f2a1a73d4d2829f3cdbb0e031d3961
|
Blender-GPT
|
github_2023
|
TREE-Ind
|
python
|
append_form
|
def append_form(
self,
obj: Union[Sequence[Tuple[str, str]], Mapping[str, str]],
headers: Optional[MultiMapping[str]] = None,
) -> Payload:
"""Helper to append form urlencoded part."""
assert isinstance(obj, (Sequence, Mapping))
if headers is None:
headers = CIMultiDict()
if isinstance(obj, Mapping):
obj = list(obj.items())
data = urlencode(obj, doseq=True)
return self.append_payload(
StringPayload(
data, headers=headers, content_type="application/x-www-form-urlencoded"
)
)
|
"""Helper to append form urlencoded part."""
|
https://github.com/TREE-Ind/Blender-GPT/blob/cf8b62ebd327940ec7d1d47cc7baa7fc595ddd44/lib/aiohttp/multipart.py#L845-L864
|
cf8b62ebd327940ec7d1d47cc7baa7fc595ddd44
|
Otter
|
github_2023
|
EvolvingLMMs-Lab
|
python
|
generate
|
def generate(self: CAUSAL_GPT_TYPES, *args: tuple, **kwargs: Dict[str, Any]):
"""Wraps original generate to enable PrefixLM attention."""
attn_modules = _get_attn_modules(model)
for attn_module in attn_modules:
attn_module.bias.data[:] = 1
output = self._original_generate(*args, **kwargs)
for attn_module in attn_modules:
attn_module.bias.data = torch.tril(attn_module.bias.data[0, 0])[None, None]
return output
|
"""Wraps original generate to enable PrefixLM attention."""
|
https://github.com/EvolvingLMMs-Lab/Otter/blob/1e7eb9a6fb12ef410082e796c463b99495637b85/src/otter_ai/models/mpt/hf_prefixlm_converter.py#L161-L169
|
1e7eb9a6fb12ef410082e796c463b99495637b85
|
DB-GPT
|
github_2023
|
TsinghuaDatabaseGroup
|
python
|
forward
|
def forward(self, x: torch.Tensor, x_pos: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(..., dim)``): Inputs.
x_pos (:obj:`torch.Tensor` of shape ``(...)``): Positions of inputs.
"""
x_pos = x_pos * self.distance_scale
freqs = x_pos[..., None].to(self.dtype) * self.inv_freq[None, :] # (..., dim/2)
# the same implementation as sat
emb = torch.cat((freqs, freqs), dim=-1) # (..., dim)
emb_cos = emb.cos() # (..., dim)
emb_sin = emb.sin() # (..., dim)
rotate_x = torch.cat([-x[..., x.size(-1) // 2 :], x[..., : x.size(-1) // 2]], dim=-1) # (..., dim)
return x * emb_cos + rotate_x * emb_sin
|
"""
Args:
x (:obj:`torch.Tensor` of shape ``(..., dim)``): Inputs.
x_pos (:obj:`torch.Tensor` of shape ``(...)``): Positions of inputs.
"""
|
https://github.com/TsinghuaDatabaseGroup/DB-GPT/blob/0ced623935ae23b390bf7a4bb4de7fb26bbc777a/multiagents/localized_llms/cpm/layers/position_embedding.py#L218-L234
|
0ced623935ae23b390bf7a4bb4de7fb26bbc777a
|
EA-LSS
|
github_2023
|
hht1996ok
|
python
|
__repr__
|
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(in_channels={self.in_channels}, '
repr_str += f'feat_channels={self.feat_channels}, '
repr_str += f'out_channels={self.out_channels_raw}, '
repr_str += f'input_feat_shape={self.input_feat_shape}, '
repr_str += f'act_cfg={self.act_cfg}, '
repr_str += f'norm_cfg={self.norm_cfg})'
return repr_str
|
"""str: a string that describes the module"""
|
https://github.com/hht1996ok/EA-LSS/blob/193c30141da8625f442d10f0fa29c226694bc3c3/mmdetection-2.11.0/mmdet/models/utils/transformer.py#L851-L860
|
193c30141da8625f442d10f0fa29c226694bc3c3
|
RSP
|
github_2023
|
ViTAE-Transformer
|
python
|
_bbox_forward
|
def _bbox_forward(self, x, rois):
"""Box head forward function used in both training and testing time"""
bbox_cls_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_reg_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs],
rois,
roi_scale_factor=self.reg_roi_scale_factor)
if self.with_shared_head:
bbox_cls_feats = self.shared_head(bbox_cls_feats)
bbox_reg_feats = self.shared_head(bbox_reg_feats)
cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
bbox_results = dict(
cls_score=cls_score,
bbox_pred=bbox_pred,
bbox_feats=bbox_cls_feats)
return bbox_results
|
"""Box head forward function used in both training and testing time"""
|
https://github.com/ViTAE-Transformer/RSP/blob/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/mmdet/models/roi_heads/double_roi_head.py#L16-L33
|
f29818739165215d341af2ef8c20f9e2daecf128
|
Directional-Stimulus-Prompting
|
github_2023
|
Leezekun
|
python
|
add
|
def add(self, *args, action_masks: Optional[np.ndarray] = None, **kwargs) -> None:
"""
:param action_masks: Masks applied to constrain the choice of possible actions.
"""
if action_masks is not None:
self.action_masks[self.pos] = action_masks.reshape(
(self.n_envs, self.mask_dims))
super().add(*args, **kwargs)
|
"""
:param action_masks: Masks applied to constrain the choice of possible actions.
"""
|
https://github.com/Leezekun/Directional-Stimulus-Prompting/blob/93b44f9e74d608732fd7809e664cdc6c9f1f769b/rl4lms/algorithms/common/maskable/buffers.py#L72-L80
|
93b44f9e74d608732fd7809e664cdc6c9f1f769b
|
Directional-Stimulus-Prompting
|
github_2023
|
Leezekun
|
python
|
_ngram_counts
|
def _ngram_counts(sequence, order):
"""Returns count of all ngrams of given order in sequence."""
if len(sequence) < order:
return collections.Counter()
return collections.Counter(_ngrams(sequence, order))
|
"""Returns count of all ngrams of given order in sequence."""
|
https://github.com/Leezekun/Directional-Stimulus-Prompting/blob/93b44f9e74d608732fd7809e664cdc6c9f1f769b/rl4lms/data_pools/task_utils/totto/eval_utils/totto_parent_eval.py#L285-L289
|
93b44f9e74d608732fd7809e664cdc6c9f1f769b
|
Grounded-Segment-Anything
|
github_2023
|
IDEA-Research
|
python
|
apply_image
|
def apply_image(self, image: np.ndarray) -> np.ndarray:
"""
Expects a numpy array with shape HxWxC in uint8 format.
"""
target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
return np.array(resize(to_pil_image(image), target_size))
|
"""
Expects a numpy array with shape HxWxC in uint8 format.
"""
|
https://github.com/IDEA-Research/Grounded-Segment-Anything/blob/126abe633ffe333e16e4a0a4e946bc1003caf757/segment_anything/segment_anything/utils/transforms.py#L26-L31
|
126abe633ffe333e16e4a0a4e946bc1003caf757
|
efficientvit
|
github_2023
|
mit-han-lab
|
python
|
compute_sigma_t
|
def compute_sigma_t(self, t):
"""Compute coefficient of x0"""
p_sigma_t = 2 * self.log_mean_coeff(t)
sigma_t = th.sqrt(1 - th.exp(p_sigma_t))
d_sigma_t = th.exp(p_sigma_t) * (2 * self.d_log_mean_coeff(t)) / (-2 * sigma_t)
return sigma_t, d_sigma_t
|
"""Compute coefficient of x0"""
|
https://github.com/mit-han-lab/efficientvit/blob/b94ff779828eea399c78f626b574da2d50ef2e49/efficientvit/diffusioncore/models/sit_sampler/path.py#L162-L167
|
b94ff779828eea399c78f626b574da2d50ef2e49
|
xhs
|
github_2023
|
ReaJason
|
python
|
create_video_note
|
def create_video_note(
self,
title,
video_path: str,
desc: str,
cover_path: str = None,
ats: list = None,
post_time: str = None,
topics: list = None,
is_private: bool = False,
wait_time: int = 3,
):
"""发布视频笔记
:param title: 笔记标题
:param video_path: 视频文件路径,目前只支持本地路径
:param desc: 笔记详情
:param cover_path: 可选,封面文件路径
:param ats: 可选,@用户信息
:param post_time: 可选,发布时间
:param topics: 可选,话题信息
:param is_private: 可选,是否私密发布
:param wait_time: 可选,默认 3 s,循环等待获取视频第一帧为笔记封面
:return:
:rtype: object
"""
if ats is None:
ats = []
if topics is None:
topics = []
file_id, token = self.get_upload_files_permit("video")
res = self.upload_file(
file_id,
token,
video_path,
content_type="video/mp4",
)
video_id, is_upload = res.headers["X-Ros-Video-Id"], False
image_id = None
if cover_path is None:
for _ in range(10):
time.sleep(wait_time)
image_id = self.get_video_first_frame_image_id(video_id)
if image_id:
break
if cover_path:
is_upload = True
image_id, token = self.get_upload_files_permit("image")
self.upload_file(image_id, token, cover_path)
cover_info = {
"file_id": image_id,
"frame": {"ts": 0, "is_user_select": False, "is_upload": is_upload},
}
video_info = {
"file_id": file_id,
"timelines": [],
"cover": cover_info,
"chapters": [],
"chapter_sync_text": False,
"entrance": "web",
}
return self.create_note(title, desc, NoteType.VIDEO.value, ats=ats, topics=topics, video_info=video_info,
post_time=post_time, is_private=is_private)
|
"""发布视频笔记
:param title: 笔记标题
:param video_path: 视频文件路径,目前只支持本地路径
:param desc: 笔记详情
:param cover_path: 可选,封面文件路径
:param ats: 可选,@用户信息
:param post_time: 可选,发布时间
:param topics: 可选,话题信息
:param is_private: 可选,是否私密发布
:param wait_time: 可选,默认 3 s,循环等待获取视频第一帧为笔记封面
:return:
:rtype: object
"""
|
https://github.com/ReaJason/xhs/blob/613036c431f1f8b68d6d4e8125e20629679bee41/xhs/core.py#L1013-L1080
|
613036c431f1f8b68d6d4e8125e20629679bee41
|
lerf
|
github_2023
|
kerrj
|
python
|
BaseImageEncoder.name
|
@abstractproperty
def name(self) -> str:
"""
returns the name of the encoder
"""
|
"""
returns the name of the encoder
"""
|
https://github.com/kerrj/lerf/blob/db08d578038d884542688511bd9ad7b489a65673/lerf/encoders/image_encoder.py#L16-L20
|
db08d578038d884542688511bd9ad7b489a65673
|
ETPNav
|
github_2023
|
MarSaKi
|
python
|
__init__
|
def __init__(self, equ_h: int, equ_w: int):
"""Args:
equ_h: (int) the height of the generated equirect
equ_w: (int) the width of the generated equirect
"""
# Cubemap input
input_projections = get_cubemap_projections(equ_h,equ_h)
# Equirectangular output
output_projection = EquirectProjection(equ_h, equ_w)
super(Cube2Equirect, self).__init__(
input_projections, output_projection
)
|
"""Args:
equ_h: (int) the height of the generated equirect
equ_w: (int) the width of the generated equirect
"""
|
https://github.com/MarSaKi/ETPNav/blob/8dec13a4e24f8bc671a3269bbcf3238793607621/habitat_extensions/obs_transformers.py#L197-L210
|
8dec13a4e24f8bc671a3269bbcf3238793607621
|
MS3D
|
github_2023
|
darrenjkt
|
python
|
get_coor_colors
|
def get_coor_colors(obj_labels):
"""
Args:
obj_labels: 1 is ground, labels > 1 indicates different instance cluster
Returns:
rgb: [N, 3]. color for each point.
"""
colors = matplotlib.colors.XKCD_COLORS.values()
max_color_num = obj_labels.max()
color_list = list(colors)[:max_color_num+1]
colors_rgba = [matplotlib.colors.to_rgba_array(color) for color in color_list]
label_rgba = np.array(colors_rgba)[obj_labels]
label_rgba = label_rgba.squeeze()[:, :3]
return label_rgba
|
"""
Args:
obj_labels: 1 is ground, labels > 1 indicates different instance cluster
Returns:
rgb: [N, 3]. color for each point.
"""
|
https://github.com/darrenjkt/MS3D/blob/ffed761a6846183966cc38d3144c90d91446fa4a/tools/visual_utils/open3d_vis_utils.py#L30-L46
|
ffed761a6846183966cc38d3144c90d91446fa4a
|
owlvit_segment_anything
|
github_2023
|
ngthanhtin
|
python
|
apply_coords
|
def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
"""
Expects a numpy array of length 2 in the final dimension. Requires the
original image size in (H, W) format.
"""
old_h, old_w = original_size
new_h, new_w = self.get_preprocess_shape(
original_size[0], original_size[1], self.target_length
)
coords = deepcopy(coords).astype(float)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
return coords
|
"""
Expects a numpy array of length 2 in the final dimension. Requires the
original image size in (H, W) format.
"""
|
https://github.com/ngthanhtin/owlvit_segment_anything/blob/2deca3a5d9760e6863e088db4d9a46912c6d83b9/segment_anything/segment_anything/utils/transforms.py#L33-L45
|
2deca3a5d9760e6863e088db4d9a46912c6d83b9
|
Synthetic-Voice-Detection-Vocoder-Artifacts
|
github_2023
|
csun22
|
python
|
f_train_wrapper
|
def f_train_wrapper(args, pt_model, loss_wrapper, device, \
optimizer_wrapper, \
train_dataset_wrapper, \
val_dataset_wrapper = None, \
checkpoint = None):
"""
f_train_wrapper(args, pt_model, loss_wrapper, device,
optimizer_wrapper
train_dataset_wrapper, val_dataset_wrapper = None,
check_point = None):
A wrapper to run the training process
Args:
args: argument information given by argpase
pt_model: pytorch model (torch.nn.Module)
loss_wrapper: a wrapper over loss function
loss_wrapper.compute(generated, target)
device: torch.device("cuda") or torch.device("cpu")
optimizer_wrapper:
a wrapper over optimizer (defined in op_manager.py)
optimizer_wrapper.optimizer is torch.optimizer
train_dataset_wrapper:
a wrapper over training data set (data_io/default_data_io.py)
train_dataset_wrapper.get_loader() returns torch.DataSetLoader
val_dataset_wrapper:
a wrapper over validation data set (data_io/default_data_io.py)
it can None.
check_point:
a check_point that stores every thing to resume training
"""
nii_display.f_print_w_date("Start model training")
##############
## Preparation
##############
# get the optimizer
optimizer_wrapper.print_info()
optimizer = optimizer_wrapper.optimizer
lr_scheduler = optimizer_wrapper.lr_scheduler
epoch_num = optimizer_wrapper.get_epoch_num()
no_best_epoch_num = optimizer_wrapper.get_no_best_epoch_num()
# get data loader for training set
train_dataset_wrapper.print_info()
train_data_loader = train_dataset_wrapper.get_loader()
train_seq_num = train_dataset_wrapper.get_seq_num()
# get the training process monitor
monitor_trn = nii_monitor.Monitor(epoch_num, train_seq_num)
# if validation data is provided, get data loader for val set
if val_dataset_wrapper is not None:
val_dataset_wrapper.print_info()
val_data_loader = val_dataset_wrapper.get_loader()
val_seq_num = val_dataset_wrapper.get_seq_num()
monitor_val = nii_monitor.Monitor(epoch_num, val_seq_num)
else:
monitor_val = None
# training log information
train_log = ''
# prepare for DataParallism if available
# pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html
if torch.cuda.device_count() > 1 and args.multi_gpu_data_parallel:
flag_multi_device = True
nii_display.f_print("\nUse %d GPUs\n" % (torch.cuda.device_count()))
# no way to call normtarget_f after pt_model is in DataParallel
normtarget_f = pt_model.normalize_target
pt_model = nn.DataParallel(pt_model)
else:
nii_display.f_print("\nUse single GPU: %s\n" % \
(torch.cuda.get_device_name(device)))
flag_multi_device = False
normtarget_f = None
pt_model.to(device, dtype=nii_dconf.d_dtype)
# print the network
nii_nn_tools.f_model_show(pt_model)
nii_nn_tools.f_loss_show(loss_wrapper)
###############################
## Resume training if necessary
###############################
# resume training or initialize the model if necessary
cp_names = nii_nn_manage_conf.CheckPointKey()
if checkpoint is not None:
if type(checkpoint) is dict:
# checkpoint
# load model parameter and optimizer state
if cp_names.state_dict in checkpoint:
# wrap the state_dic in f_state_dict_wrapper
# in case the model is saved when DataParallel is on
pt_model.load_state_dict(
nii_nn_tools.f_state_dict_wrapper(
checkpoint[cp_names.state_dict],
flag_multi_device))
# load optimizer state
if cp_names.optimizer in checkpoint and \
not args.ignore_optimizer_statistics_in_trained_model:
optimizer.load_state_dict(checkpoint[cp_names.optimizer])
# optionally, load training history
if not args.ignore_training_history_in_trained_model:
#nii_display.f_print("Load ")
if cp_names.trnlog in checkpoint:
monitor_trn.load_state_dic(
checkpoint[cp_names.trnlog])
if cp_names.vallog in checkpoint and monitor_val:
monitor_val.load_state_dic(
checkpoint[cp_names.vallog])
if cp_names.info in checkpoint:
train_log = checkpoint[cp_names.info]
if cp_names.lr_scheduler in checkpoint and \
checkpoint[cp_names.lr_scheduler] and lr_scheduler.f_valid():
lr_scheduler.f_load_state_dict(
checkpoint[cp_names.lr_scheduler])
nii_display.f_print("Load check point, resume training")
else:
nii_display.f_print("Load pretrained model and optimizer")
else:
# only model status
pt_model.load_state_dict(
nii_nn_tools.f_state_dict_wrapper(
checkpoint, flag_multi_device))
nii_display.f_print("Load pretrained model")
######################
### User defined setup
######################
if hasattr(pt_model, "other_setups"):
nii_display.f_print("Conduct User-defined setup")
pt_model.other_setups()
# This should be merged with other_setups
if hasattr(pt_model, "g_pretrained_model_path") and \
hasattr(pt_model, "g_pretrained_model_prefix"):
nii_display.f_print("Load pret-rained models as part of this mode")
nii_nn_tools.f_load_pretrained_model_partially(
pt_model, pt_model.g_pretrained_model_path,
pt_model.g_pretrained_model_prefix)
######################
### Start training
######################
# other variables
flag_early_stopped = False
start_epoch = monitor_trn.get_epoch()
epoch_num = monitor_trn.get_max_epoch()
# print
_ = nii_op_display_tk.print_log_head()
nii_display.f_print_message(train_log, flush=True, end='')
# loop over multiple epochs
for epoch_idx in range(start_epoch, epoch_num):
# training one epoch
pt_model.train()
# set validation flag if necessary
if hasattr(pt_model, 'validation'):
pt_model.validation = False
mes = "Warning: model.validation is deprecated, "
mes += "please use model.flag_validation"
nii_display.f_print(mes, 'warning')
if hasattr(pt_model, 'flag_validation'):
pt_model.flag_validation = False
f_run_one_epoch(args, pt_model, loss_wrapper, device, \
monitor_trn, train_data_loader, \
epoch_idx, optimizer, normtarget_f)
time_trn = monitor_trn.get_time(epoch_idx)
loss_trn = monitor_trn.get_loss(epoch_idx)
# if necessary, do validataion
if val_dataset_wrapper is not None:
# set eval() if necessary
if args.eval_mode_for_validation:
pt_model.eval()
# set validation flag if necessary
if hasattr(pt_model, 'validation'):
pt_model.validation = True
mes = "Warning: model.validation is deprecated, "
mes += "please use model.flag_validation"
nii_display.f_print(mes, 'warning')
if hasattr(pt_model, 'flag_validation'):
pt_model.flag_validation = True
with torch.no_grad():
f_run_one_epoch(args, pt_model, loss_wrapper, \
device, \
monitor_val, val_data_loader, \
epoch_idx, None, normtarget_f)
time_val = monitor_val.get_time(epoch_idx)
loss_val = monitor_val.get_loss(epoch_idx)
# update lr rate scheduler if necessary
if lr_scheduler.f_valid():
lr_scheduler.f_step(loss_val)
else:
time_val, loss_val = 0, 0
if val_dataset_wrapper is not None:
flag_new_best = monitor_val.is_new_best()
else:
flag_new_best = True
# print information
train_log += nii_op_display_tk.print_train_info(
epoch_idx, time_trn, loss_trn, time_val, loss_val,
flag_new_best, optimizer_wrapper.get_lr_info())
# save the best model
if flag_new_best:
tmp_best_name = nii_nn_tools.f_save_trained_name(args)
torch.save(pt_model.state_dict(), tmp_best_name)
# save intermediate model if necessary
if not args.not_save_each_epoch:
tmp_model_name = nii_nn_tools.f_save_epoch_name(args, epoch_idx)
if monitor_val is not None:
tmp_val_log = monitor_val.get_state_dic()
else:
tmp_val_log = None
if lr_scheduler.f_valid():
lr_scheduler_state = lr_scheduler.f_state_dict()
else:
lr_scheduler_state = None
# save
tmp_dic = {
cp_names.state_dict : pt_model.state_dict(),
cp_names.info : train_log,
cp_names.optimizer : optimizer.state_dict(),
cp_names.trnlog : monitor_trn.get_state_dic(),
cp_names.vallog : tmp_val_log,
cp_names.lr_scheduler : lr_scheduler_state
}
torch.save(tmp_dic, tmp_model_name)
if args.verbose == 1:
nii_display.f_eprint(str(datetime.datetime.now()))
nii_display.f_eprint("Save {:s}".format(tmp_model_name),
flush=True)
# Early stopping
# note: if LR scheduler is used, early stopping will be
# disabled
if lr_scheduler.f_allow_early_stopping() and \
monitor_val is not None and \
monitor_val.should_early_stop(no_best_epoch_num):
flag_early_stopped = True
break
# loop done
nii_op_display_tk.print_log_tail()
if flag_early_stopped:
nii_display.f_print("Training finished by early stopping")
else:
nii_display.f_print("Training finished")
nii_display.f_print("Model is saved to", end = '')
nii_display.f_print("{}".format(nii_nn_tools.f_save_trained_name(args)))
return
|
"""
f_train_wrapper(args, pt_model, loss_wrapper, device,
optimizer_wrapper
train_dataset_wrapper, val_dataset_wrapper = None,
check_point = None):
A wrapper to run the training process
Args:
args: argument information given by argpase
pt_model: pytorch model (torch.nn.Module)
loss_wrapper: a wrapper over loss function
loss_wrapper.compute(generated, target)
device: torch.device("cuda") or torch.device("cpu")
optimizer_wrapper:
a wrapper over optimizer (defined in op_manager.py)
optimizer_wrapper.optimizer is torch.optimizer
train_dataset_wrapper:
a wrapper over training data set (data_io/default_data_io.py)
train_dataset_wrapper.get_loader() returns torch.DataSetLoader
val_dataset_wrapper:
a wrapper over validation data set (data_io/default_data_io.py)
it can None.
check_point:
a check_point that stores every thing to resume training
"""
|
https://github.com/csun22/Synthetic-Voice-Detection-Vocoder-Artifacts/blob/f67a2714489f39eda34f6347e2617ee3a3df2a6b/core_scripts/nn_manager/nn_manager.py#L192-L470
|
f67a2714489f39eda34f6347e2617ee3a3df2a6b
|
textual-paint
|
github_2023
|
1j01
|
python
|
AnsiArtDocument.decode_based_on_file_extension
|
@staticmethod
def decode_based_on_file_extension(content: bytes, file_path: str, default_bg: str = "#ffffff", default_fg: str = "#000000") -> 'AnsiArtDocument':
"""Creates a document from the given bytes, detecting the file format.
Raises FormatReadNotSupported if the file format is not supported for reading. Some are write-only.
Raises UnicodeDecodeError, which can be a very long message, so make sure to handle it!
Raises UnidentifiedImageError if the format is not detected.
"""
format_id = AnsiArtDocument.format_from_extension(file_path)
# print("Supported image formats for reading:", Image.OPEN.keys())
# TODO: try loading as image first, then as text if that fails with UnidentifiedImageError
# That way it can handle images without file extensions.
if format_id in Image.OPEN:
return AnsiArtDocument.from_image_format(content)
elif format_id == "ANSI":
return AnsiArtDocument.from_ansi(content.decode('utf-8'), default_bg, default_fg)
elif format_id == "IRC":
return AnsiArtDocument.from_irc(content.decode('utf-8'), default_bg, default_fg)
elif format_id == "PLAINTEXT":
return AnsiArtDocument.from_plain(content.decode('utf-8'), default_bg, default_fg)
elif format_id == "SVG":
return AnsiArtDocument.from_svg(content.decode('utf-8'), default_bg, default_fg)
elif format_id in Image.SAVE or format_id in ["HTML", "RICH_CONSOLE_MARKUP"]:
# This is a write-only format.
raise FormatReadNotSupported(localized_message=_("Cannot read files saved as %1 format.", format_id))
else:
# This is an unknown format.
# For now at least, I'm preserving the behavior of loading as ANSI/PLAINTEXT.
return AnsiArtDocument.from_text(content.decode('utf-8'), default_bg, default_fg)
|
"""Creates a document from the given bytes, detecting the file format.
Raises FormatReadNotSupported if the file format is not supported for reading. Some are write-only.
Raises UnicodeDecodeError, which can be a very long message, so make sure to handle it!
Raises UnidentifiedImageError if the format is not detected.
"""
|
https://github.com/1j01/textual-paint/blob/d61de649a6a3a660d2b024e4259d99acbd45116b/src/textual_paint/ansi_art_document.py#L1033-L1061
|
d61de649a6a3a660d2b024e4259d99acbd45116b
|
fastapi_best_architecture
|
github_2023
|
fastapi-practices
|
python
|
update
|
async def update(self, db: AsyncSession, dept_id: int, obj_in: UpdateDeptParam) -> int:
"""
更新部门
:param db:
:param dept_id:
:param obj_in:
:return:
"""
return await self.update_model(db, dept_id, obj_in)
|
"""
更新部门
:param db:
:param dept_id:
:param obj_in:
:return:
"""
|
https://github.com/fastapi-practices/fastapi_best_architecture/blob/1d1a9175801291ae614d983b1e77c3455bb0839c/backend/app/admin/crud/crud_dept.py#L69-L78
|
1d1a9175801291ae614d983b1e77c3455bb0839c
|
neuralsim
|
github_2023
|
PJLab-ADG
|
python
|
enable
|
def enable(self, scene: Scene = None):
""" Actually loads the learnable params into the scene nodes attr
Args:
scene (Scene, optional): An optional target scene to load the learnable params.
If not provided, `self.scene` will be used. Defaults to None.
"""
self.is_enabled = True
scene = self.scene or scene
if self.refine_ego_motion:
for node_id, transform in self['ego_motion'].items():
node = scene.all_nodes[node_id]
# NOTE: Here, 'transform' of node.frame_data.subattr will be set to \
# the handle of the learnable 'transform' nn.Module here.
# This allows the learnable transform to be used with gradients during the rendering process,
# making the learnable param here part of the computation graph.
node.frame_data.subattr['transform'] = transform
if self.refine_other_motion:
for node_id, transform in self['other_motion'].items():
node = scene.all_nodes[node_id]
node.frame_data.subattr['transform'] = transform
if self.refine_camera_intr:
pass
if self.refine_camera_extr:
pass
if self.refine_sensor_ts:
for node_id, ts in self['sensor_ts'].items():
node = scene.all_nodes[node_id]
node.frame_data.subattr['global_ts'] = ts
|
""" Actually loads the learnable params into the scene nodes attr
Args:
scene (Scene, optional): An optional target scene to load the learnable params.
If not provided, `self.scene` will be used. Defaults to None.
"""
|
https://github.com/PJLab-ADG/neuralsim/blob/faba099e0feb11ea0089490a5e87565e25bc4a2c/app/models/scene/learnable_params.py#L194-L226
|
faba099e0feb11ea0089490a5e87565e25bc4a2c
|
AgentForge
|
github_2023
|
DataBassGit
|
python
|
TripleExtract.find_subject_predicate_object_with_chunk
|
@staticmethod
def find_subject_predicate_object_with_chunk(sentence, chunk):
"""
Extract subject, predicate, and object from a sentence, using a chunk for context.
Args:
sentence (str): The input sentence.
chunk (str): A chunk of text providing context.
Returns:
tuple: A tuple containing (subject, predicate, object), each as a string or None if not found.
Raises:
ValueError: If the input sentence or chunk is not a string or is empty.
Exception: For any other unexpected errors during processing.
"""
if not isinstance(sentence, str) or not sentence.strip():
raise ValueError("Input sentence must be a non-empty string")
if not isinstance(chunk, str) or not chunk.strip():
raise ValueError("Input chunk must be a non-empty string")
try:
doc = nlp(chunk) # Process the chunk for context
sentence_doc = nlp(sentence) # Process the sentence separately
# Identify named entities using SpaCy NER
entities = [ent for ent in doc.ents if ent.label_ in ("PERSON", "ORG")]
subject, predicate, _object = None, None, None
# Use syntactic dependency labels to identify subject and verb
for token in sentence_doc:
if token.dep_ == "nsubj" or token.dep_ == "nsubjpass":
subject = token
# Filter irrelevant words based on POS tags and additional stop words
if subject and isinstance(subject, spacy.tokens.Doc):
filtered_subject_words = [
word.text
for word in subject.words
if word.pos_ not in ["STOP", "ADP", "DET", "AUX"] # Add AUX for auxiliary verbs
]
else:
filtered_subject_words = [subject.text] if subject else None
# Join the filtered words with a space
subject_text = " ".join(filtered_subject_words) if filtered_subject_words else None
print(f"\nDEBUG CHUNK: \nFiltered subject words: {filtered_subject_words}\nSubject text: {subject_text}\n")
elif token.pos_ == "VERB":
# Check if it's part of a verb phrase indicating the predicate
if token.dep_ == "aux" and nlp(token.head.text).pos_ == "VERB":
continue # Skip auxiliary verbs
else:
predicate = token.head # Consider the head of the verb phrase as the predicate
break
# If subject not found directly, explore other possibilities
if not subject:
if predicate:
# Check for subject within relative clauses or previous entities
for child in predicate.children:
if child.dep_ == "relcl":
subject = TripleExtract.find_subject_in_clause_with_chunk(child,
entities.copy()) # Pass a copy of entities
if subject:
break
elif child.dep_ == "pobj" and len(entities) > 0:
# Check if object from previous sentence is the subject
for entity in entities:
if entity.text == child.text:
subject = entity
break
else:
# Try finding a verb phrase as the subject
for chunk in doc.noun_chunks:
if any(token.pos_ == "VERB" for token in chunk):
subject = chunk
break
# Look for candidate objects after finding subject and predicate
if subject and predicate:
for child in predicate.children:
if child.dep_ in ["dobj", "attr", "iobj"]:
for grandchild in child.children: # Iterate over child.children
if grandchild.dep_ == "pobj":
_object = grandchild
break # Stop iterating after finding the object
elif child.dep_ == "prep":
for grandchild in child.children:
if grandchild.dep_ == "pobj":
_object = grandchild
break # Stop iterating after finding the object
# Convert identified tokens to text if they exist
subject_text = subject.text if subject else None
predicate_text = predicate.lemma_ if predicate else None # Using lemma for base form of verb
object_text = _object.text if _object else None
print(
f"usingContext:\nSubject: {subject_text}\nPredicate: {predicate_text}\nObject: {object_text}")
return subject_text, predicate_text, object_text
except Exception as e:
raise Exception(f"An error occurred while processing the sentence with chunk: {str(e)}")
|
"""
Extract subject, predicate, and object from a sentence, using a chunk for context.
Args:
sentence (str): The input sentence.
chunk (str): A chunk of text providing context.
Returns:
tuple: A tuple containing (subject, predicate, object), each as a string or None if not found.
Raises:
ValueError: If the input sentence or chunk is not a string or is empty.
Exception: For any other unexpected errors during processing.
"""
|
https://github.com/DataBassGit/AgentForge/blob/feabdd0febe7172e1b99b5dcacc1f7138847f7e3/src/agentforge/tools/triple_extract.py#L137-L241
|
feabdd0febe7172e1b99b5dcacc1f7138847f7e3
|
phasellm
|
github_2023
|
wgryc
|
python
|
test_complete_chat_sse_kwargs
|
def test_complete_chat_sse_kwargs(self):
"""
Tests that the StreamingClaudeWrapper can be used to perform streaming chat completion with kwargs.
"""
fixture = StreamingClaudeWrapper(anthropic_api_key, model="claude-v1", format_sse=True, append_stop_token=False,
temperature=0.9, top_k=2)
test_streaming_complete_chat_sse(self, fixture, check_stop=False, verbose=False)
|
"""
Tests that the StreamingClaudeWrapper can be used to perform streaming chat completion with kwargs.
"""
|
https://github.com/wgryc/phasellm/blob/974d026dc649e4a71da4c25bf8c934622e56cf5d/tests/e2e/llms/test_e2e_llms.py#L624-L631
|
974d026dc649e4a71da4c25bf8c934622e56cf5d
|
Prompt-Segment-Anything
|
github_2023
|
RockeyCoss
|
python
|
get_rel_pos
|
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
"""
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int): size of query q.
k_size (int): size of key k.
rel_pos (Tensor): relative position embeddings (L, C).
Returns:
Extracted positional embeddings according to relative positions.
"""
max_rel_dist = int(2 * max(q_size, k_size) - 1)
# Interpolate rel pos if needed.
if rel_pos.shape[0] != max_rel_dist:
# Interpolate rel pos.
rel_pos_resized = F.interpolate(
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
size=max_rel_dist,
mode="linear",
)
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
else:
rel_pos_resized = rel_pos
# Scale the coords with short length if shapes for q and k are different.
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
|
"""
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int): size of query q.
k_size (int): size of key k.
rel_pos (Tensor): relative position embeddings (L, C).
Returns:
Extracted positional embeddings according to relative positions.
"""
|
https://github.com/RockeyCoss/Prompt-Segment-Anything/blob/5d1704db7489e79d4cd2a6eed99b7a39d8d5acf0/projects/instance_segment_anything/models/segment_anything/modeling/image_encoder.py#L292-L322
|
5d1704db7489e79d4cd2a6eed99b7a39d8d5acf0
|
HumanSD
|
github_2023
|
IDEA-Research
|
python
|
join_path
|
def join_path(self, filepath: Union[str, Path],
*filepaths: Union[str, Path]) -> str:
"""Concatenate all file paths.
Join one or more filepath components intelligently. The return value
is the concatenation of filepath and any members of *filepaths.
Args:
filepath (str or Path): Path to be concatenated.
Returns:
str: The result of concatenation.
"""
return self.client.join_path(filepath, *filepaths)
|
"""Concatenate all file paths.
Join one or more filepath components intelligently. The return value
is the concatenation of filepath and any members of *filepaths.
Args:
filepath (str or Path): Path to be concatenated.
Returns:
str: The result of concatenation.
"""
|
https://github.com/IDEA-Research/HumanSD/blob/c5db29dd66a3e40afa8b4bed630f0aa7ea001880/comparison_models/ControlNet/annotator/uniformer/mmcv/fileio/file_client.py#L1079-L1092
|
c5db29dd66a3e40afa8b4bed630f0aa7ea001880
|
samm
|
github_2023
|
bingogome
|
python
|
normalize
|
def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False):
"""Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std."""
return TF.normalize(x, mean, std, inplace=inplace)
|
"""Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std."""
|
https://github.com/bingogome/samm/blob/ee627cd5ad43d65d57182a7a1ae0fca3e51a79fd/samm-python-terminal/thirdparty/MedicalSAMAdapter/models/MobileSAMv2/ultralytics/yolo/data/dataloaders/v5augmentations.py#L59-L61
|
ee627cd5ad43d65d57182a7a1ae0fca3e51a79fd
|
EasyDeL
|
github_2023
|
erfanzar
|
python
|
ring_attention_tpu
|
@partial(
jax.custom_vjp,
nondiff_argnums=[6, 7, 8, 9, 10, 11],
)
def ring_attention_tpu(
query: chex.Array,
key: chex.Array,
value: chex.Array,
bias: tp.Optional[chex.Array] = None,
segment_ids: tp.Optional[SegmentIds] = None,
cache_idx: tp.Optional[int] = None,
axis_name: tp.Optional[str] = None,
float32_logits: bool = True,
softmax_scale: tp.Optional[float] = None,
blocksize_q: int = 256,
blocksize_k: int = 256,
blocksize_c: tp.Optional[int] = None,
) -> chex.Array:
"""Computes ring attention using FlashAttention on TPU.
Args:
query: Query array of shape (batch, query_len, num_heads, dim_per_head).
key: Key array of shape (batch, kv_len, num_heads, dim_per_head).
value: Value array of shape (batch, kv_len, num_heads, dim_per_head).
bias: tp.Optional bias array. Its shape depends on the attention mechanism.
segment_ids: tp.Optional segment ids for Q and KV sequences.
cache_idx: tp.Optional cache index for use with caching.
axis_name: tp.Optional name of the axis to ppermute over (for multi-host support).
float32_logits: Whether to compute logits in float32.
softmax_scale: tp.Optional scaling factor for the softmax function.
blocksize_q: Block size for the query sequence.
blocksize_k: Block size for the key/value sequence.
blocksize_c: tp.Optional block size for causal masking.
Returns:
Output array of shape (batch, query_len, num_heads, dim_per_head).
"""
y, _ = _ring_flash_attention_fwd_tpu(
query,
key,
value,
bias,
segment_ids,
cache_idx,
axis_name,
float32_logits,
softmax_scale,
blocksize_q,
blocksize_k,
blocksize_c,
)
return y
|
"""Computes ring attention using FlashAttention on TPU.
Args:
query: Query array of shape (batch, query_len, num_heads, dim_per_head).
key: Key array of shape (batch, kv_len, num_heads, dim_per_head).
value: Value array of shape (batch, kv_len, num_heads, dim_per_head).
bias: tp.Optional bias array. Its shape depends on the attention mechanism.
segment_ids: tp.Optional segment ids for Q and KV sequences.
cache_idx: tp.Optional cache index for use with caching.
axis_name: tp.Optional name of the axis to ppermute over (for multi-host support).
float32_logits: Whether to compute logits in float32.
softmax_scale: tp.Optional scaling factor for the softmax function.
blocksize_q: Block size for the query sequence.
blocksize_k: Block size for the key/value sequence.
blocksize_c: tp.Optional block size for causal masking.
Returns:
Output array of shape (batch, query_len, num_heads, dim_per_head).
"""
|
https://github.com/erfanzar/EasyDeL/blob/104bb42a9cf23050382a53392b677dc4d4b8d579/easydel/kernels/tpu_ops/pallas_ring_attention.py#L323-L375
|
104bb42a9cf23050382a53392b677dc4d4b8d579
|
EasyDeL
|
github_2023
|
erfanzar
|
python
|
pack_sequences
|
def pack_sequences(
dataset: Dataset,
max_length: int = 512,
pad_token_id: int = 0,
reset_position_ids: bool = False,
num_proc: tp.Optional[int] = None,
):
"""
Pack sequences together with their attention masks and position IDs
# With continuous position IDs
packed_dataset = pack_sequences(
dataset,
max_length=512,
pad_token_id=0,
reset_position_ids=False
)
# With reset position IDs for each sequence
packed_dataset = pack_sequences(
dataset,
max_length=512,
pad_token_id=0,
reset_position_ids=True
)
# Example output format for a packed sequence with two sequences:
# reset_position_ids=False:
{
'input_ids': [seq1_tokens + [PAD] + seq2_tokens + [PAD] + padding],
'attention_mask': [1,1,1,0,1,1,1,0,0,0],
'position_ids': [0,1,2,3,4,5,6,7,0,0]
}
# reset_position_ids=True:
{
'input_ids': [seq1_tokens + [PAD] + seq2_tokens + [PAD] + padding],
'attention_mask': [1,1,1,0,1,1,1,0,0,0],
'position_ids': [0,1,2,0,0,1,2,0,0,0]
}
Args:
dataset: Dataset containing 'input_ids' and 'attention_mask'
max_length: Maximum length of packed sequence
pad_token_id: Token ID used for padding
reset_position_ids: If True, reset position IDs for each sequence in the pack
Returns:
packed_dataset: Dataset with packed sequences, attention masks, and position IDs
"""
def pack_examples(examples):
current_packed_input_ids = []
current_packed_attention_mask = []
current_packed_position_ids = []
current_length = 0
packed_input_ids = []
packed_attention_mask = []
packed_position_ids = []
def get_position_ids(length, start_position=0):
if reset_position_ids:
return list(range(length))
else:
return list(range(start_position, start_position + length))
# Iterate through all examples
for input_ids, attention_mask in zip(
examples["input_ids"], examples["attention_mask"]
):
seq_length = len(input_ids)
# If adding this sequence would exceed max_length, start a new packed sequence
if current_length + seq_length + 1 > max_length:
# Pad the current packed sequence if needed
if current_length < max_length:
padding_length = max_length - current_length
current_packed_input_ids.extend([pad_token_id] * padding_length)
current_packed_attention_mask.extend([0] * padding_length)
current_packed_position_ids.extend([0] * padding_length)
# Add the completed packed sequence to results
packed_input_ids.append(current_packed_input_ids)
packed_attention_mask.append(current_packed_attention_mask)
packed_position_ids.append(current_packed_position_ids)
# Start new packed sequence
current_packed_input_ids = []
current_packed_attention_mask = []
current_packed_position_ids = []
current_length = 0
# Generate position IDs for current sequence
position_ids = get_position_ids(seq_length, start_position=current_length)
# Add current sequence
current_packed_input_ids.extend(input_ids)
current_packed_attention_mask.extend(attention_mask)
current_packed_position_ids.extend(position_ids)
# Add separator token
current_packed_input_ids.append(pad_token_id)
current_packed_attention_mask.append(0)
current_packed_position_ids.append(
position_ids[-1] + 1 if not reset_position_ids else 0
)
current_length += seq_length + 1
# Handle the last packed sequence
if current_packed_input_ids:
# Pad if needed
if current_length < max_length:
padding_length = max_length - current_length
current_packed_input_ids.extend([pad_token_id] * padding_length)
current_packed_attention_mask.extend([0] * padding_length)
current_packed_position_ids.extend([0] * padding_length)
packed_input_ids.append(current_packed_input_ids)
packed_attention_mask.append(current_packed_attention_mask)
packed_position_ids.append(current_packed_position_ids)
return {
"input_ids": packed_input_ids,
"attention_mask": packed_attention_mask,
"position_ids": packed_position_ids,
}
# Process the dataset in batches
packed_dataset = dataset.map(
pack_examples,
batched=True,
remove_columns=dataset.column_names,
desc="Packing sequences",
num_proc=num_proc,
)
return packed_dataset
|
"""
Pack sequences together with their attention masks and position IDs
# With continuous position IDs
packed_dataset = pack_sequences(
dataset,
max_length=512,
pad_token_id=0,
reset_position_ids=False
)
# With reset position IDs for each sequence
packed_dataset = pack_sequences(
dataset,
max_length=512,
pad_token_id=0,
reset_position_ids=True
)
# Example output format for a packed sequence with two sequences:
# reset_position_ids=False:
{
'input_ids': [seq1_tokens + [PAD] + seq2_tokens + [PAD] + padding],
'attention_mask': [1,1,1,0,1,1,1,0,0,0],
'position_ids': [0,1,2,3,4,5,6,7,0,0]
}
# reset_position_ids=True:
{
'input_ids': [seq1_tokens + [PAD] + seq2_tokens + [PAD] + padding],
'attention_mask': [1,1,1,0,1,1,1,0,0,0],
'position_ids': [0,1,2,0,0,1,2,0,0,0]
}
Args:
dataset: Dataset containing 'input_ids' and 'attention_mask'
max_length: Maximum length of packed sequence
pad_token_id: Token ID used for padding
reset_position_ids: If True, reset position IDs for each sequence in the pack
Returns:
packed_dataset: Dataset with packed sequences, attention masks, and position IDs
"""
|
https://github.com/erfanzar/EasyDeL/blob/104bb42a9cf23050382a53392b677dc4d4b8d579/easydel/trainers/packer.py#L11-L149
|
104bb42a9cf23050382a53392b677dc4d4b8d579
|
vosk-tts
|
github_2023
|
alphacep
|
python
|
solve_heun
|
def solve_heun(self, x, t_span, mu, mask, spks, cond, training=False, guidance_scale=0.0):
"""
Fixed heun solver for ODEs.
Args:
x (torch.Tensor): random noise
t_span (torch.Tensor): n_timesteps interpolated
shape: (n_timesteps + 1,)
mu (torch.Tensor): output of encoder
shape: (batch_size, n_feats, mel_timesteps)
mask (torch.Tensor): output_mask
shape: (batch_size, 1, mel_timesteps)
cond: Not used but kept for future purposes
"""
t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0]
#-! : reserved space for debugger
sol = []
steps = 1
while steps <= len(t_span) - 1:
dphi_dt = self.func_dphi_dt(x, mask, mu, t, spks, cond, training=training, guidance_scale=guidance_scale)
dphi_dt_2 = self.func_dphi_dt(x + dt * dphi_dt, mask, mu, t+dt, spks, cond, training=training, guidance_scale=guidance_scale)
#- Euler's -> Y'n+1' = Y'n' + h * F(X'n', Y'n')
# x = x + dt * dphi_dt
#- Heun's -> Y'n+1' = Y'n' + h * 0.5( F(X'n', Y'n') + F(X'n' + h, Y'n' + h * F(X'n', Y'n') ) )
x = x + dt * 0.5 * (dphi_dt + dphi_dt_2)
t = t + dt
sol.append(x)
if steps < len(t_span) - 1:
dt = t_span[steps + 1] - t
steps += 1
return sol[-1]
|
"""
Fixed heun solver for ODEs.
Args:
x (torch.Tensor): random noise
t_span (torch.Tensor): n_timesteps interpolated
shape: (n_timesteps + 1,)
mu (torch.Tensor): output of encoder
shape: (batch_size, n_feats, mel_timesteps)
mask (torch.Tensor): output_mask
shape: (batch_size, 1, mel_timesteps)
cond: Not used but kept for future purposes
"""
|
https://github.com/alphacep/vosk-tts/blob/89b23a8b033133e25e3e7f53d07939645b8ea51c/training/stabletts/matcha/models/components/flow_matching.py#L91-L126
|
89b23a8b033133e25e3e7f53d07939645b8ea51c
|
discord-ext-voice-recv
|
github_2023
|
imayhaveborkedit
|
python
|
reset
|
def reset(self) -> None:
"""
Clear buffer and reset internal counters.
"""
self._buffer.clear()
self._has_item.clear()
self._prefill = self.prefill
self._last_tx = self._last_rx = self._generation = self._generation_ts = 0
|
"""
Clear buffer and reset internal counters.
"""
|
https://github.com/imayhaveborkedit/discord-ext-voice-recv/blob/3398a4d9d2f646cfcd60f68e626cd750b759893f/discord/ext/voice_recv/buffer.py#L202-L210
|
3398a4d9d2f646cfcd60f68e626cd750b759893f
|
caikit
|
github_2023
|
caikit
|
python
|
has_data_stream
|
def has_data_stream(arg_type: Type) -> bool:
"""Recursive check for a DataStream container in a type annotation"""
if _is_data_stream(arg_type):
return True
typing_args = get_args(arg_type)
if len(typing_args) > 0:
for typ in typing_args:
if has_data_stream(typ):
return True
return False
|
"""Recursive check for a DataStream container in a type annotation"""
|
https://github.com/caikit/caikit/blob/ce3fa2c129ce15a5e2095d466a8f01ec2e0c577d/caikit/runtime/service_generation/type_helpers.py#L26-L37
|
ce3fa2c129ce15a5e2095d466a8f01ec2e0c577d
|
DB-GPT
|
github_2023
|
eosphoros-ai
|
python
|
AppResource.app_name
|
@property
def app_name(self):
"""Return the app name."""
return self._app_name
|
"""Return the app name."""
|
https://github.com/eosphoros-ai/DB-GPT/blob/0310ce9fa333f14954bed4c4994da5ef419c27c7/dbgpt/agent/resource/app.py#L99-L102
|
0310ce9fa333f14954bed4c4994da5ef419c27c7
|
DB-GPT
|
github_2023
|
eosphoros-ai
|
python
|
Document.langchain2doc
|
@classmethod
def langchain2doc(cls, document):
"""Transform Langchain to Document format."""
metadata = document.metadata or {}
return cls(content=document.page_content, metadata=metadata)
|
"""Transform Langchain to Document format."""
|
https://github.com/eosphoros-ai/DB-GPT/blob/0310ce9fa333f14954bed4c4994da5ef419c27c7/dbgpt/core/interface/knowledge.py#L28-L32
|
0310ce9fa333f14954bed4c4994da5ef419c27c7
|
DB-GPT
|
github_2023
|
eosphoros-ai
|
python
|
get_table_summary
|
def get_table_summary(self, table_name):
"""Get table summary for table.
example:
table_name(column1(column1 comment),column2(column2 comment),
column3(column3 comment) and index keys, and table comment: {table_comment})
"""
return _parse_table_summary(self.db, self.summary_template, table_name)
|
"""Get table summary for table.
example:
table_name(column1(column1 comment),column2(column2 comment),
column3(column3 comment) and index keys, and table comment: {table_comment})
"""
|
https://github.com/eosphoros-ai/DB-GPT/blob/0310ce9fa333f14954bed4c4994da5ef419c27c7/dbgpt/rag/summary/rdbms_db_summary.py#L52-L59
|
0310ce9fa333f14954bed4c4994da5ef419c27c7
|
DB-GPT
|
github_2023
|
eosphoros-ai
|
python
|
__init__
|
def __init__(self):
"""Client for vis protocol."""
self._vis_tag: Dict[str, Vis] = {}
|
"""Client for vis protocol."""
|
https://github.com/eosphoros-ai/DB-GPT/blob/0310ce9fa333f14954bed4c4994da5ef419c27c7/dbgpt/vis/client.py#L18-L20
|
0310ce9fa333f14954bed4c4994da5ef419c27c7
|
DB-GPT
|
github_2023
|
eosphoros-ai
|
python
|
bin_search
|
async def bin_search(
self, blocks: List[str], model_nam: str, max_new_token: int
) -> int:
"""Binary search to find the split point."""
l, r = 0, len(blocks) - 1
while l < r:
mid = l + r + 1 >> 1
current_tokens = await self._llm_client.count_token(
model_nam, "".join(blocks[: mid + 1])
)
if current_tokens <= max_new_token:
l = mid
else:
r = mid - 1
return r
|
"""Binary search to find the split point."""
|
https://github.com/eosphoros-ai/DB-GPT/blob/0310ce9fa333f14954bed4c4994da5ef419c27c7/i18n/translate_util.py#L300-L314
|
0310ce9fa333f14954bed4c4994da5ef419c27c7
|
OpenAdapt
|
github_2023
|
OpenAdaptAI
|
python
|
stop_recording
|
def stop_recording(self) -> None:
"""Stop recording."""
Thread(target=stop_record).start()
|
"""Stop recording."""
|
https://github.com/OpenAdaptAI/OpenAdapt/blob/acdbb7b2236fcbb6f8da8e0162d394608d49d33e/openadapt/app/tray.py#L255-L257
|
acdbb7b2236fcbb6f8da8e0162d394608d49d33e
|
Image2Paragraph
|
github_2023
|
showlab
|
python
|
get_detection_dataset_dicts
|
def get_detection_dataset_dicts(
names,
filter_empty=True,
min_keypoints=0,
proposal_files=None,
check_consistency=True,
):
"""
Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
Args:
names (str or list[str]): a dataset name or a list of dataset names
filter_empty (bool): whether to filter out images without instance annotations
min_keypoints (int): filter out images with fewer keypoints than
`min_keypoints`. Set to 0 to do nothing.
proposal_files (list[str]): if given, a list of object proposal files
that match each dataset in `names`.
check_consistency (bool): whether to check if datasets have consistent metadata.
Returns:
list[dict]: a list of dicts following the standard dataset dict format.
"""
if isinstance(names, str):
names = [names]
assert len(names), names
dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names]
for dataset_name, dicts in zip(names, dataset_dicts):
assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
if proposal_files is not None:
assert len(names) == len(proposal_files)
# load precomputed proposals from proposal files
dataset_dicts = [
load_proposals_into_dataset(dataset_i_dicts, proposal_file)
for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files)
]
if isinstance(dataset_dicts[0], torchdata.Dataset):
return torchdata.ConcatDataset(dataset_dicts)
dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
has_instances = "annotations" in dataset_dicts[0]
if filter_empty and has_instances:
dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
if min_keypoints > 0 and has_instances:
dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
if check_consistency and has_instances:
try:
class_names = MetadataCatalog.get(names[0]).thing_classes
check_metadata_consistency("thing_classes", names)
print_instances_class_histogram(dataset_dicts, class_names)
except AttributeError: # class names are not available for this dataset
pass
assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names))
return dataset_dicts
|
"""
Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
Args:
names (str or list[str]): a dataset name or a list of dataset names
filter_empty (bool): whether to filter out images without instance annotations
min_keypoints (int): filter out images with fewer keypoints than
`min_keypoints`. Set to 0 to do nothing.
proposal_files (list[str]): if given, a list of object proposal files
that match each dataset in `names`.
check_consistency (bool): whether to check if datasets have consistent metadata.
Returns:
list[dict]: a list of dicts following the standard dataset dict format.
"""
|
https://github.com/showlab/Image2Paragraph/blob/a24210a6dd4535a1a43af7a6170fb8ad1e3a6013/models/grit_src/third_party/CenterNet2/detectron2/data/build.py#L216-L273
|
a24210a6dd4535a1a43af7a6170fb8ad1e3a6013
|
scikit-fingerprints
|
github_2023
|
scikit-fingerprints
|
python
|
number_of_rotatable_bonds
|
def number_of_rotatable_bonds(mol: Mol) -> int:
"""
Number of rotatable bonds.
Calculates the total number of rotatable bonds in the molecule.
Parameters
----------
mol : RDKit ``Mol`` object
The molecule for which the number of rotatable bonds is to be calculated.
Examples
--------
>>> from rdkit.Chem import MolFromSmiles
>>> from skfp.descriptors import number_of_rotatable_bonds
>>> mol = MolFromSmiles("C=CC=C") # Butadiene
>>> number_of_rotatable_bonds(mol)
1
"""
return CalcNumRotatableBonds(mol)
|
"""
Number of rotatable bonds.
Calculates the total number of rotatable bonds in the molecule.
Parameters
----------
mol : RDKit ``Mol`` object
The molecule for which the number of rotatable bonds is to be calculated.
Examples
--------
>>> from rdkit.Chem import MolFromSmiles
>>> from skfp.descriptors import number_of_rotatable_bonds
>>> mol = MolFromSmiles("C=CC=C") # Butadiene
>>> number_of_rotatable_bonds(mol)
1
"""
|
https://github.com/scikit-fingerprints/scikit-fingerprints/blob/bf4faa208aa115873b138f20543800679bc59da2/skfp/descriptors/constitutional.py#L193-L212
|
bf4faa208aa115873b138f20543800679bc59da2
|
CVPR2023-UniDistill
|
github_2023
|
megvii-research
|
python
|
_reg_loss
|
def _reg_loss(self, regr, gt_regr, mask):
"""L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).clone().float()
isnotnan = (~torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
num = reduce_mean(num)
loss = loss / (num + 1e-4)
return loss
|
"""L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
|
https://github.com/megvii-research/CVPR2023-UniDistill/blob/32f02b4304cdf435b83b2265f59fdfed2710c3a5/unidistill/layers/losses/det3d.py#L394-L416
|
32f02b4304cdf435b83b2265f59fdfed2710c3a5
|
h2o-llmstudio
|
github_2023
|
h2oai
|
python
|
run_train
|
def run_train(
cfg: DefaultConfigProblemBase,
model: torch.nn.Module,
optimizer,
scheduler,
epoch_steps,
train_dataloader,
val_dataloader,
val_df: pd.DataFrame,
):
"""Runs the training loop.
Args:
cfg: DefaultConfigProblemBase config object
model: model
train_dataloader: custom training Dataloader
train_df: train DataFrame
val_dataloader: custom validation Dataloader
val_df: validation DataFrame
Returns:
Validation prediction output
Validation loss
Validation metric
Last train batch
"""
if (
hasattr(cfg.augmentation, "neftune_noise_alpha")
and cfg.augmentation.neftune_noise_alpha > 0
):
activate_neftune(model, cfg.augmentation.neftune_noise_alpha)
scaler: GradScaler | None = None
if cfg.environment.mixed_precision:
scaler = GradScaler(
enabled=(cfg.environment.mixed_precision_dtype == "float16")
)
optimizer.zero_grad(set_to_none=True)
# Prepare NLP Augmentation
nlp_augment = None
if hasattr(cfg.augmentation, "nlp_augmentations_class"):
nlp_augment = cfg.augmentation.nlp_augmentations_class(cfg=cfg)
start_epoch = 0
_, metric_mode, _ = cfg.prediction.metric_class.get(cfg.prediction.metric)
objective_op: Callable[[float, float], bool]
if metric_mode == "max":
best_val_metric = -np.inf
objective_op = np.greater
else:
best_val_metric = np.inf
objective_op = np.less
if cfg.training.evaluate_before_training:
val_loss, val_metric = run_eval(
cfg=cfg, model=model, val_dataloader=val_dataloader, val_df=val_df
)
for epoch in range(start_epoch, cfg.training.epochs):
set_seed(
cfg.environment._seed
+ epoch * cfg.environment._world_size * cfg.environment.number_of_workers
+ cfg.environment._local_rank * cfg.environment.number_of_workers
)
logger.info(f"Training Epoch: {epoch + 1} / {cfg.training.epochs}")
if (
cfg.environment._distributed
and not cfg.environment.use_deepspeed
and hasattr(train_dataloader.sampler, "set_epoch")
):
train_dataloader.sampler.set_epoch(epoch) # type: ignore
tqdm_out = TqdmToLogger(logger, level=logging.INFO)
progress_bar = tqdm(
total=epoch_steps,
disable=cfg.environment._local_rank != 0,
file=tqdm_out,
ascii=True,
desc="train loss",
mininterval=0,
)
tr_it = iter(train_dataloader)
losses = []
model.train()
log_update_steps = max(epoch_steps // 20, 1)
evaluation_step = max(int(epoch_steps * cfg.training.evaluation_epochs), 1)
logger.info(f"Evaluation step: {evaluation_step}")
for itr, data in enumerate(tr_it):
cfg.environment._curr_step += (
cfg.training.batch_size * cfg.environment._world_size
)
# Batch to device
batch = cfg.dataset.dataset_class.batch_to_device(
data, cfg.environment._device
)
# NLP augmentation
if nlp_augment is not None:
batch = nlp_augment(batch)
# Plot first batch
if epoch == 0 and itr == 0 and cfg.environment._local_rank == 0:
plot = cfg.logging.plots_class.plot_batch(batch=batch, cfg=cfg)
log_plot(cfg, plot, "train_data")
# only need to sync gradients at last step of grad accumulation
model.require_backward_grad_sync = itr % cfg.training.grad_accumulation == 0
# Forward pass
with autocast(
enabled=cfg.environment.mixed_precision,
dtype=get_torch_dtype(cfg.environment.mixed_precision_dtype),
):
output_dict = model.forward(batch)
loss = output_dict["loss"]
if ~np.isfinite(loss.item()) and (epoch > start_epoch or itr > 20):
raise LLMTrainingException(
"NaN caught in loss during training. "
"Please, reduce learning rate, change dtype, "
"or disable mixed precision. Alternatively, "
"gradient clipping may help to stabilize training."
)
losses.append(loss.item())
# loss is a mean loss per batch/sample
# as grad_accumulations sums up the gradients, this loss must be scaled
# by the number of grad_accumulations, to have similar behavior for
# BS * grad_accumulations = const.
if cfg.training.grad_accumulation != 1:
loss = loss / cfg.training.grad_accumulation
# Backward pass
if (
cfg.environment.mixed_precision
and len(cfg.environment.gpus)
and not cfg.environment.use_deepspeed
):
scaler.scale(loss).backward() # type: ignore
if itr % cfg.training.grad_accumulation == 0:
if cfg.training.gradient_clip > 0:
scaler.unscale_(optimizer) # type: ignore
torch.nn.utils.clip_grad_norm_(
model.parameters(), cfg.training.gradient_clip
)
scaler.step(optimizer) # type: ignore
scaler.update()
optimizer.zero_grad(set_to_none=True)
else:
if cfg.environment.use_deepspeed:
model.backward(loss) # type: ignore[operator]
else:
loss.backward()
if itr % cfg.training.grad_accumulation == 0:
if cfg.training.gradient_clip > 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), cfg.training.gradient_clip
)
optimizer.step()
optimizer.zero_grad(set_to_none=True)
if cfg.environment._distributed:
torch.cuda.synchronize(device=cfg.environment._local_rank)
if scheduler is not None:
scheduler.step()
if cfg.environment._local_rank == 0:
cfg.logging._logger.log(
"train",
"loss",
losses[-1],
step=cfg.environment._curr_step
/ cfg.environment._step_log_denominator,
)
cfg.logging._logger.log(
"meta",
"lr",
optimizer.param_groups[0]["lr"],
step=cfg.environment._curr_step
/ cfg.environment._step_log_denominator,
)
if cfg.training.differential_learning_rate_layers:
cfg.logging._logger.log(
"meta",
"lr_diff",
optimizer.param_groups[2]["lr"],
step=cfg.environment._curr_step
/ cfg.environment._step_log_denominator,
)
cfg.logging._logger.log(
"internal",
"current_step",
cfg.environment._curr_step,
)
for key in output_dict:
if key.startswith("additional_log_"):
cfg.logging._logger.log(
"train",
key.replace("additional_log_", ""),
output_dict[key].item(),
step=cfg.environment._curr_step
/ cfg.environment._step_log_denominator,
)
# Show logs each 5% of the epoch (only if doing per epoch evaluation)
if (itr + 1) % log_update_steps == 0 or itr == epoch_steps - 1:
progress_bar.set_description(
f"train loss: {np.mean(losses[-10:]):.2f}", refresh=False
)
if (itr + 1) % log_update_steps == 0:
progress_bar.update(log_update_steps)
else:
progress_bar.update(epoch_steps % log_update_steps)
del output_dict
# Validation loop
if (itr + 1) % evaluation_step == 0:
# TODO: Move back after fixing slow generation of deepspeed.
if cfg.training.save_checkpoint == "last":
logger.info(
f"Saving last model checkpoint to {cfg.output_directory}"
)
save_checkpoint(model=model, path=cfg.output_directory, cfg=cfg)
elif cfg.training.save_checkpoint == "each_evaluation_epoch":
checkpoint_path = os.path.join(
cfg.output_directory, f"epoch_{epoch}_step_{itr}"
)
logger.info(f"Saving model checkpoint to {checkpoint_path}")
save_checkpoint(model=model, path=checkpoint_path, cfg=cfg)
create_symlinks_in_parent_folder(checkpoint_path)
val_loss, val_metric = run_eval(
cfg=cfg, model=model, val_dataloader=val_dataloader, val_df=val_df
)
if cfg.training.save_checkpoint == "best":
if objective_op(val_metric, best_val_metric):
logger.info(
f"Saving best model checkpoint: "
f"val_{cfg.prediction.metric} {best_val_metric:.5} -> "
f"{val_metric:.5} to {cfg.output_directory}"
)
save_checkpoint(model=model, path=cfg.output_directory, cfg=cfg)
best_val_metric = val_metric
model.train()
progress_bar.close()
del progress_bar
if cfg.environment._distributed:
torch.cuda.synchronize(device=cfg.environment._local_rank)
torch.distributed.barrier()
if cfg.environment._local_rank == 0:
cfg.logging._logger.log("internal", "epoch", epoch + 1)
if cfg.environment._distributed:
torch.distributed.barrier()
return val_loss, val_metric
|
"""Runs the training loop.
Args:
cfg: DefaultConfigProblemBase config object
model: model
train_dataloader: custom training Dataloader
train_df: train DataFrame
val_dataloader: custom validation Dataloader
val_df: validation DataFrame
Returns:
Validation prediction output
Validation loss
Validation metric
Last train batch
"""
|
https://github.com/h2oai/h2o-llmstudio/blob/39f5709ff6ad6db08282df0648352b6a88cb749d/llm_studio/train.py#L165-L436
|
39f5709ff6ad6db08282df0648352b6a88cb749d
|
langroid
|
github_2023
|
langroid
|
python
|
create_custom_chat_agent
|
def create_custom_chat_agent(
name: str, llm_config: OpenAIGPTConfig, system_message: str
) -> ChatAgent:
"""creates a ChatAgent with the given parameters.
Args:
name (str): The name of the agent.
llm_config (OpenAIGPTConfig): The LLM configuration for the agent.
system_message (str): The system message to guide the agent's LLM.
Returns:
ChatAgent: A configured ChatAgent instance.
"""
# Modify the system message to include instructions for the agent
additional_system_message = """**Response format (strictly follow this structure):**
Pro:
- [First key point]
- [Second key point]
- [Third key point]
**Limit responses to exactly 3 points expressed as single sentences.**"
"""
system_message = f"""
Start your response with '{name}: ' and then follow the instructions below.
{system_message} {additional_system_message}
"""
return ChatAgent(
ChatAgentConfig(
llm=llm_config,
name=name,
system_message=system_message,
)
)
|
"""creates a ChatAgent with the given parameters.
Args:
name (str): The name of the agent.
llm_config (OpenAIGPTConfig): The LLM configuration for the agent.
system_message (str): The system message to guide the agent's LLM.
Returns:
ChatAgent: A configured ChatAgent instance.
"""
|
https://github.com/langroid/langroid/blob/8eecdf99b42a0cb488522cc950e0890e3e9b72ed/examples/multi-agent-debate/main_chainlit.py#L89-L120
|
8eecdf99b42a0cb488522cc950e0890e3e9b72ed
|
langroid
|
github_2023
|
langroid
|
python
|
clear_all_collections
|
def clear_all_collections(self, really: bool = False, prefix: str = "") -> int:
"""Clear all collections with the given prefix."""
if not really:
logger.warning("Not deleting all collections, set really=True to confirm")
return 0
coll_names = self.list_collections(empty=False)
coll_names = [name for name in coll_names if name.startswith(prefix)]
if len(coll_names) == 0:
logger.warning(f"No collections found with prefix {prefix}")
return 0
for name in coll_names:
self.delete_collection(name)
logger.warning(
f"""
Deleted {len(coll_names)} indices from Momento VI
"""
)
return len(coll_names)
|
"""Clear all collections with the given prefix."""
|
https://github.com/langroid/langroid/blob/8eecdf99b42a0cb488522cc950e0890e3e9b72ed/langroid/vector_store/momento.py#L99-L117
|
8eecdf99b42a0cb488522cc950e0890e3e9b72ed
|
ChatPLUG
|
github_2023
|
X-PLUG
|
python
|
T5EncoderDecoderInitHelper.verify_onnx
|
@staticmethod
def verify_onnx(
model: T5EncoderDecoderInit,
ort_session: InferenceSession,
device: torch.device,
max_cases=4,
):
"""Compare the result from PyTorch and OnnxRuntime to verify the ONNX model is good."""
ort_inputs = ort_session.get_inputs()
use_decoder_input_ids = len(ort_inputs) == 3
test_cases = [(4, 11), (1, 2), (3, 1), (8, 5)]
test_cases_max_diff = []
for (batch_size, encode_sequence_length) in test_cases[:max_cases]:
inputs = T5EncoderDecoderInitInputs.create_dummy(
model.config,
batch_size,
encode_sequence_length,
use_decoder_input_ids=use_decoder_input_ids,
device=device,
)
ort_outputs = T5EncoderDecoderInitHelper.onnxruntime_inference(ort_session, inputs)
# Run inference of PyTorch model
input_list = inputs.to_list()
torch_outputs = model(*input_list)
assert torch_outputs[0].cpu().numpy().shape == ort_outputs['logits'].shape
max_diff = numpy.amax(numpy.abs(torch_outputs[0].cpu().numpy() - ort_outputs['logits'].cpu().numpy()))
logger.debug(f"logits max_diff={max_diff}")
max_diff_all = max_diff
assert torch_outputs[1].cpu().numpy().shape == ort_outputs['encoder_hidden_states'].shape
max_diff = numpy.amax(
numpy.abs(torch_outputs[1].cpu().numpy() - ort_outputs['encoder_hidden_states'].cpu().numpy()))
logger.debug(f"encoder_hidden_states max_diff={max_diff}")
max_diff_all = max(max_diff_all, max_diff)
for i in range(2 * model.config.num_layers):
if i % 2 == 0:
ort_outputs_i = ort_outputs[f'present_key_self_{i//2}']
else:
ort_outputs_i = ort_outputs[f'present_value_self_{i//2}']
max_diff = numpy.amax(numpy.abs(torch_outputs[2][i].cpu().numpy() - ort_outputs_i.cpu().numpy()))
logger.debug(f"self attention past state {i} max_diff={max_diff}")
for i in range(2 * model.config.num_layers):
if i % 2 == 0:
ort_outputs_i = ort_outputs[f'present_key_cross_{i//2}']
else:
ort_outputs_i = ort_outputs[f'present_value_cross_{i//2}']
max_diff = numpy.amax(
numpy.abs(torch_outputs[3][i].cpu().numpy() - ort_outputs_i.cpu().numpy())
)
logger.debug(f"cross attention past state {i} max_diff={max_diff}")
max_diff_all = max(max_diff_all, max_diff)
test_cases_max_diff.append(max_diff_all)
logger.info(
f"batch_size={batch_size} encode_sequence_length={encode_sequence_length}, max_diff={max_diff_all}"
)
return max(test_cases_max_diff)
|
"""Compare the result from PyTorch and OnnxRuntime to verify the ONNX model is good."""
|
https://github.com/X-PLUG/ChatPLUG/blob/3f2b8608f59e443214a22d123faaa5930fb3b783/XDPX/xdpx/utils/thirdparty/onnx_transformers/models/t5/t5_encoder_decoder_init.py#L219-L282
|
3f2b8608f59e443214a22d123faaa5930fb3b783
|
stable-diffusion-multi-user
|
github_2023
|
wolverinn
|
python
|
forward
|
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: interpolated data
"""
x = self.interp(
x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
)
return x
|
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: interpolated data
"""
|
https://github.com/wolverinn/stable-diffusion-multi-user/blob/1d79ad90de9c75692bd8e49d57679697dbefd393/extensions/sd-webui-controlnet/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/blocks.py#L226-L240
|
1d79ad90de9c75692bd8e49d57679697dbefd393
|
stable-diffusion-multi-user
|
github_2023
|
wolverinn
|
python
|
UnCLIPPipeline.__call__
|
@torch.no_grad()
def __call__(
self,
prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: int = 1,
prior_num_inference_steps: int = 25,
decoder_num_inference_steps: int = 25,
super_res_num_inference_steps: int = 7,
generator: Optional[torch.Generator] = None,
prior_latents: Optional[torch.FloatTensor] = None,
decoder_latents: Optional[torch.FloatTensor] = None,
super_res_latents: Optional[torch.FloatTensor] = None,
text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None,
text_attention_mask: Optional[torch.Tensor] = None,
prior_guidance_scale: float = 4.0,
decoder_guidance_scale: float = 8.0,
output_type: Optional[str] = "pil",
return_dict: bool = True,
):
"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`):
The prompt or prompts to guide the image generation. This can only be left undefined if
`text_model_output` and `text_attention_mask` is passed.
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
prior_num_inference_steps (`int`, *optional*, defaults to 25):
The number of denoising steps for the prior. More denoising steps usually lead to a higher quality
image at the expense of slower inference.
decoder_num_inference_steps (`int`, *optional*, defaults to 25):
The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality
image at the expense of slower inference.
super_res_num_inference_steps (`int`, *optional*, defaults to 7):
The number of denoising steps for super resolution. More denoising steps usually lead to a higher
quality image at the expense of slower inference.
generator (`torch.Generator`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
prior_latents (`torch.FloatTensor` of shape (batch size, embeddings dimension), *optional*):
Pre-generated noisy latents to be used as inputs for the prior.
decoder_latents (`torch.FloatTensor` of shape (batch size, channels, height, width), *optional*):
Pre-generated noisy latents to be used as inputs for the decoder.
super_res_latents (`torch.FloatTensor` of shape (batch size, channels, super res height, super res width), *optional*):
Pre-generated noisy latents to be used as inputs for the decoder.
prior_guidance_scale (`float`, *optional*, defaults to 4.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
decoder_guidance_scale (`float`, *optional*, defaults to 4.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
text_model_output (`CLIPTextModelOutput`, *optional*):
Pre-defined CLIPTextModel outputs that can be derived from the text encoder. Pre-defined text outputs
can be passed for tasks like text embedding interpolations. Make sure to also pass
`text_attention_mask` in this case. `prompt` can the be left to `None`.
text_attention_mask (`torch.Tensor`, *optional*):
Pre-defined CLIP text attention mask that can be derived from the tokenizer. Pre-defined text attention
masks are necessary when passing `text_model_output`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
"""
if prompt is not None:
if isinstance(prompt, str):
batch_size = 1
elif isinstance(prompt, list):
batch_size = len(prompt)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
else:
batch_size = text_model_output[0].shape[0]
device = self._execution_device
batch_size = batch_size * num_images_per_prompt
do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0
text_embeddings, text_encoder_hidden_states, text_mask = self._encode_prompt(
prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output, text_attention_mask
)
# prior
self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device)
prior_timesteps_tensor = self.prior_scheduler.timesteps
embedding_dim = self.prior.config.embedding_dim
prior_latents = self.prepare_latents(
(batch_size, embedding_dim),
text_embeddings.dtype,
device,
generator,
prior_latents,
self.prior_scheduler,
)
for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents
predicted_image_embedding = self.prior(
latent_model_input,
timestep=t,
proj_embedding=text_embeddings,
encoder_hidden_states=text_encoder_hidden_states,
attention_mask=text_mask,
).predicted_image_embedding
if do_classifier_free_guidance:
predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2)
predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * (
predicted_image_embedding_text - predicted_image_embedding_uncond
)
if i + 1 == prior_timesteps_tensor.shape[0]:
prev_timestep = None
else:
prev_timestep = prior_timesteps_tensor[i + 1]
prior_latents = self.prior_scheduler.step(
predicted_image_embedding,
timestep=t,
sample=prior_latents,
generator=generator,
prev_timestep=prev_timestep,
).prev_sample
prior_latents = self.prior.post_process_latents(prior_latents)
image_embeddings = prior_latents
# done prior
# decoder
text_encoder_hidden_states, additive_clip_time_embeddings = self.text_proj(
image_embeddings=image_embeddings,
text_embeddings=text_embeddings,
text_encoder_hidden_states=text_encoder_hidden_states,
do_classifier_free_guidance=do_classifier_free_guidance,
)
decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1)
self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device)
decoder_timesteps_tensor = self.decoder_scheduler.timesteps
num_channels_latents = self.decoder.in_channels
height = self.decoder.sample_size
width = self.decoder.sample_size
decoder_latents = self.prepare_latents(
(batch_size, num_channels_latents, height, width),
text_encoder_hidden_states.dtype,
device,
generator,
decoder_latents,
self.decoder_scheduler,
)
for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents
noise_pred = self.decoder(
sample=latent_model_input,
timestep=t,
encoder_hidden_states=text_encoder_hidden_states,
class_labels=additive_clip_time_embeddings,
attention_mask=decoder_text_mask,
).sample
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1)
noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1)
noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond)
noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)
if i + 1 == decoder_timesteps_tensor.shape[0]:
prev_timestep = None
else:
prev_timestep = decoder_timesteps_tensor[i + 1]
# compute the previous noisy sample x_t -> x_t-1
decoder_latents = self.decoder_scheduler.step(
noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator
).prev_sample
decoder_latents = decoder_latents.clamp(-1, 1)
image_small = decoder_latents
# done decoder
# super res
self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device)
super_res_timesteps_tensor = self.super_res_scheduler.timesteps
channels = self.super_res_first.in_channels // 2
height = self.super_res_first.sample_size
width = self.super_res_first.sample_size
super_res_latents = self.prepare_latents(
(batch_size, channels, height, width),
image_small.dtype,
device,
generator,
super_res_latents,
self.super_res_scheduler,
)
interpolate_antialias = {}
if "antialias" in inspect.signature(F.interpolate).parameters:
interpolate_antialias["antialias"] = True
image_upscaled = F.interpolate(
image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias
)
for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)):
# no classifier free guidance
if i == super_res_timesteps_tensor.shape[0] - 1:
unet = self.super_res_last
else:
unet = self.super_res_first
latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1)
noise_pred = unet(
sample=latent_model_input,
timestep=t,
).sample
if i + 1 == super_res_timesteps_tensor.shape[0]:
prev_timestep = None
else:
prev_timestep = super_res_timesteps_tensor[i + 1]
# compute the previous noisy sample x_t -> x_t-1
super_res_latents = self.super_res_scheduler.step(
noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator
).prev_sample
image = super_res_latents
# done super res
# post processing
image = image * 0.5 + 0.5
image = image.clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
|
"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`):
The prompt or prompts to guide the image generation. This can only be left undefined if
`text_model_output` and `text_attention_mask` is passed.
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
prior_num_inference_steps (`int`, *optional*, defaults to 25):
The number of denoising steps for the prior. More denoising steps usually lead to a higher quality
image at the expense of slower inference.
decoder_num_inference_steps (`int`, *optional*, defaults to 25):
The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality
image at the expense of slower inference.
super_res_num_inference_steps (`int`, *optional*, defaults to 7):
The number of denoising steps for super resolution. More denoising steps usually lead to a higher
quality image at the expense of slower inference.
generator (`torch.Generator`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
prior_latents (`torch.FloatTensor` of shape (batch size, embeddings dimension), *optional*):
Pre-generated noisy latents to be used as inputs for the prior.
decoder_latents (`torch.FloatTensor` of shape (batch size, channels, height, width), *optional*):
Pre-generated noisy latents to be used as inputs for the decoder.
super_res_latents (`torch.FloatTensor` of shape (batch size, channels, super res height, super res width), *optional*):
Pre-generated noisy latents to be used as inputs for the decoder.
prior_guidance_scale (`float`, *optional*, defaults to 4.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
decoder_guidance_scale (`float`, *optional*, defaults to 4.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
text_model_output (`CLIPTextModelOutput`, *optional*):
Pre-defined CLIPTextModel outputs that can be derived from the text encoder. Pre-defined text outputs
can be passed for tasks like text embedding interpolations. Make sure to also pass
`text_attention_mask` in this case. `prompt` can the be left to `None`.
text_attention_mask (`torch.Tensor`, *optional*):
Pre-defined CLIP text attention mask that can be derived from the tokenizer. Pre-defined text attention
masks are necessary when passing `text_model_output`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
"""
|
https://github.com/wolverinn/stable-diffusion-multi-user/blob/1d79ad90de9c75692bd8e49d57679697dbefd393/repositories/stable-diffusion-stability-ai/ldm/modules/karlo/diffusers_pipeline.py#L241-L512
|
1d79ad90de9c75692bd8e49d57679697dbefd393
|
stable-diffusion-multi-user
|
github_2023
|
wolverinn
|
python
|
r1_penalty
|
def r1_penalty(real_pred, real_img):
"""R1 regularization for discriminator. The core idea is to
penalize the gradient on real data alone: when the
generator distribution produces the true data distribution
and the discriminator is equal to 0 on the data manifold, the
gradient penalty ensures that the discriminator cannot create
a non-zero gradient orthogonal to the data manifold without
suffering a loss in the GAN game.
Ref:
Eq. 9 in Which training methods for GANs do actually converge.
"""
grad_real = autograd.grad(outputs=real_pred.sum(), inputs=real_img, create_graph=True)[0]
grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
|
"""R1 regularization for discriminator. The core idea is to
penalize the gradient on real data alone: when the
generator distribution produces the true data distribution
and the discriminator is equal to 0 on the data manifold, the
gradient penalty ensures that the discriminator cannot create
a non-zero gradient orthogonal to the data manifold without
suffering a loss in the GAN game.
Ref:
Eq. 9 in Which training methods for GANs do actually converge.
"""
|
https://github.com/wolverinn/stable-diffusion-multi-user/blob/1d79ad90de9c75692bd8e49d57679697dbefd393/sd-docker-slim/repositories/CodeFormer/basicsr/losses/losses.py#L390-L404
|
1d79ad90de9c75692bd8e49d57679697dbefd393
|
nos
|
github_2023
|
autonomi-ai
|
python
|
ModelSpec.name
|
@property
def name(self) -> str:
"""Return the model name (for backwards compatibility)."""
return self.id
|
"""Return the model name (for backwards compatibility)."""
|
https://github.com/autonomi-ai/nos/blob/2761f7b50fa3173c74ec63a3321527fbb980b9ac/nos/common/spec.py#L449-L452
|
2761f7b50fa3173c74ec63a3321527fbb980b9ac
|
Ask-Anything
|
github_2023
|
OpenGVLab
|
python
|
overlay_instances
|
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5,
):
"""
Args:
boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
or a :class:`RotatedBoxes`,
or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image,
labels (list[str]): the text to be displayed for each instance.
masks (masks-like object): Supported types are:
* :class:`detectron2.structures.PolygonMasks`,
:class:`detectron2.structures.BitMasks`.
* list[list[ndarray]]: contains the segmentation masks for all objects in one image.
The first level of the list corresponds to individual instances. The second
level to all the polygon that compose the instance, and the third level
to the polygon coordinates. The third level should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
* list[ndarray]: each ndarray is a binary mask of shape (H, W).
* list[dict]: each dict is a COCO-style RLE.
keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
where the N is the number of instances and K is the number of keypoints.
The last dimension corresponds to (x, y, visibility or score).
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = 0
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(
boxes=boxes, labels=labels, assigned_colors=assigned_colors
)
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
horiz_align = "left"
elif masks is not None:
# skip small mask without polygon
if len(masks[i].polygons) == 0:
continue
x0, y0, x1, y1 = masks[i].bbox()
# draw text in the center (defined by median) when box is not drawn
# median is less sensitive to outliers.
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
continue # drawing the box confidence for keypoints isn't very useful.
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.5
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
# draw keypoints
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
|
"""
Args:
boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
or a :class:`RotatedBoxes`,
or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image,
labels (list[str]): the text to be displayed for each instance.
masks (masks-like object): Supported types are:
* :class:`detectron2.structures.PolygonMasks`,
:class:`detectron2.structures.BitMasks`.
* list[list[ndarray]]: contains the segmentation masks for all objects in one image.
The first level of the list corresponds to individual instances. The second
level to all the polygon that compose the instance, and the third level
to the polygon coordinates. The third level should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
* list[ndarray]: each ndarray is a binary mask of shape (H, W).
* list[dict]: each dict is a COCO-style RLE.
keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
where the N is the number of instances and K is the number of keypoints.
The last dimension corresponds to (x, y, visibility or score).
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
|
https://github.com/OpenGVLab/Ask-Anything/blob/c7f879b10533ba7d030c04ac559374663e35e3a4/video_chat_text/video_chat_with_ChatGPT/models/grit_src/third_party/CenterNet2/detectron2/utils/visualizer.py#L607-L747
|
c7f879b10533ba7d030c04ac559374663e35e3a4
|
Ask-Anything
|
github_2023
|
OpenGVLab
|
python
|
predict_proposals
|
def predict_proposals(
self,
anchors: List[Boxes],
pred_objectness_logits: List[torch.Tensor],
pred_anchor_deltas: List[torch.Tensor],
image_sizes: List[Tuple[int, int]],
):
"""
Decode all the predicted box regression deltas to proposals. Find the top proposals
by applying NMS and removing boxes that are too small.
Returns:
proposals (list[Instances]): list of N Instances. The i-th Instances
stores post_nms_topk object proposals for image i, sorted by their
objectness score in descending order.
"""
# The proposals are treated as fixed for joint training with roi heads.
# This approach ignores the derivative w.r.t. the proposal boxes’ coordinates that
# are also network responses.
with torch.no_grad():
pred_proposals = self._decode_proposals(anchors, pred_anchor_deltas)
return find_top_rpn_proposals(
pred_proposals,
pred_objectness_logits,
image_sizes,
self.nms_thresh,
self.pre_nms_topk[self.training],
self.post_nms_topk[self.training],
self.min_box_size,
self.training,
)
|
"""
Decode all the predicted box regression deltas to proposals. Find the top proposals
by applying NMS and removing boxes that are too small.
Returns:
proposals (list[Instances]): list of N Instances. The i-th Instances
stores post_nms_topk object proposals for image i, sorted by their
objectness score in descending order.
"""
|
https://github.com/OpenGVLab/Ask-Anything/blob/c7f879b10533ba7d030c04ac559374663e35e3a4/video_chat_text/video_chat_with_StableLM/models/grit_src/third_party/CenterNet2/detectron2/modeling/proposal_generator/rpn.py#L482-L512
|
c7f879b10533ba7d030c04ac559374663e35e3a4
|
Ask-Anything
|
github_2023
|
OpenGVLab
|
python
|
__init__
|
def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):
"""
Arguments:
tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1
image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can
be smaller than (H, W) due to padding.
"""
self.tensor = tensor
self.image_sizes = image_sizes
|
"""
Arguments:
tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1
image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can
be smaller than (H, W) due to padding.
"""
|
https://github.com/OpenGVLab/Ask-Anything/blob/c7f879b10533ba7d030c04ac559374663e35e3a4/video_chat_text/video_chat_with_StableLM/models/grit_src/third_party/CenterNet2/detectron2/structures/image_list.py#L23-L31
|
c7f879b10533ba7d030c04ac559374663e35e3a4
|
Ask-Anything
|
github_2023
|
OpenGVLab
|
python
|
swish
|
def swish(x, inplace: bool = False):
"""Swish - Described in: https://arxiv.org/abs/1710.05941
"""
return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
|
"""Swish - Described in: https://arxiv.org/abs/1710.05941
"""
|
https://github.com/OpenGVLab/Ask-Anything/blob/c7f879b10533ba7d030c04ac559374663e35e3a4/video_chat_text/video_chat_with_StableLM/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/bifpn.py#L40-L43
|
c7f879b10533ba7d030c04ac559374663e35e3a4
|
Ask-Anything
|
github_2023
|
OpenGVLab
|
python
|
voc_eval
|
def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# first load gt
# read list of images
with PathManager.open(imagesetfile, "r") as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
# load annots
recs = {}
for imagename in imagenames:
recs[imagename] = parse_rec(annopath.format(imagename))
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj["name"] == classname]
bbox = np.array([x["bbox"] for x in R])
difficult = np.array([x["difficult"] for x in R]).astype(np.bool)
# difficult = np.array([False for x in R]).astype(np.bool) # treat all "difficult" as GT
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det}
# read dets
detfile = detpath.format(classname)
with open(detfile, "r") as f:
lines = f.readlines()
splitlines = [x.strip().split(" ") for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4)
# sort by confidence
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R["bbox"].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
# union
uni = (
(bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
+ (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
- inters
)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R["difficult"][jmax]:
if not R["det"][jmax]:
tp[d] = 1.0
R["det"][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
|
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
|
https://github.com/OpenGVLab/Ask-Anything/blob/c7f879b10533ba7d030c04ac559374663e35e3a4/video_chat_with_MOSS/models/grit_src/third_party/CenterNet2/detectron2/evaluation/pascal_voc_evaluation.py#L187-L300
|
c7f879b10533ba7d030c04ac559374663e35e3a4
|
Ask-Anything
|
github_2023
|
OpenGVLab
|
python
|
apply_image
|
def apply_image(self, img, interp=None):
"""
img should be a numpy array, formatted as Height * Width * Nchannels
"""
if len(img) == 0 or self.angle % 360 == 0:
return img
assert img.shape[:2] == (self.h, self.w)
interp = interp if interp is not None else self.interp
return cv2.warpAffine(img, self.rm_image, (self.bound_w, self.bound_h), flags=interp)
|
"""
img should be a numpy array, formatted as Height * Width * Nchannels
"""
|
https://github.com/OpenGVLab/Ask-Anything/blob/c7f879b10533ba7d030c04ac559374663e35e3a4/video_chat_with_StableLM/models/grit_src/third_party/CenterNet2/detectron2/data/transforms/transform.py#L200-L208
|
c7f879b10533ba7d030c04ac559374663e35e3a4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.