prompt
stringlengths 1.74k
34.3k
| ref
stringlengths 4
432
|
---|---|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Harvard-Ophthalmology-AI-Lab/FairSeg
# Path: SAMed/segment_anything/modeling/common.py
class LayerNorm2d(nn.Module):
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
# Path: SAMed/segment_anything/modeling/common.py
class MLPBlock(nn.Module):
def __init__(
self,
embedding_dim: int,
mlp_dim: int,
act: Type[nn.Module] = nn.GELU,
) -> None:
super().__init__()
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
self.act = act()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.lin2(self.act(self.lin1(x)))
# Path: SAMed/segment_anything/modeling/image_encoder.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from icecream import ic
from typing import Optional, Tuple, Type
from .common import LayerNorm2d, MLPBlock
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
| LayerNorm2d(out_chans), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: anand2312/quill-server
# Path: quill_server/db/models.py
class User(Base):
__tablename__ = "user"
id: Mapped[UUID] = mapped_column(pg_UUID(as_uuid=True), primary_key=True, default=uuid4) # noqa: A003
username: Mapped[str] = mapped_column(unique=True)
password: Mapped[str]
created_at: Mapped[datetime] = mapped_column(DateTime, default=func.now())
def __repr__(self) -> str:
return f"<User(id={self.id} username={self.username})>"
# Path: quill_server/realtime/room.py
class GameMember(BaseModel):
"""Represents a user currently playing in a Quill room."""
user_id: str
username: str
# Path: quill_server/realtime/room.py
class Room(BaseModel):
"""Represents a Quill game room."""
room_id: str
owner: GameMember
users: list[GameMember]
status: GameStatus
@classmethod
def new(cls: type["Room"], owner: User) -> "Room":
return cls(
room_id=str(uuid4()),
owner=_db_user_to_game_member(owner),
users=[],
status=GameStatus.LOBBY,
)
async def start(self) -> None:
"""Start the game in this room."""
self.status = GameStatus.ONGOING
logger.info(f"Setting room:{self.room_id}:status = ONGOING")
await cache.client.set(f"room:{self.room_id}:status", str(self.status))
async def end(self) -> None:
"""End the game in this room."""
self.status = GameStatus.ENDED
logger.info(f"Setting room:{self.room_id}:status = ENDED")
await cache.client.set(f"room:{self.room_id}:status", str(self.status))
async def join(self, user: User) -> None:
"""Add a user to this room."""
# reject connection if the user is already in the room...
if any([u.user_id == str(user.id) for u in self.users]):
raise ValueError("User is already in this room")
# or if the game isn't in the lobby state anymore...
elif self.status != GameStatus.LOBBY:
raise ValueError("Room is no longer accepting members")
# or if the room already has 8 members
elif len(self.users) == 8:
raise ValueError("Maximum room capacity reached")
data = _db_user_to_game_member(user)
self.users.append(data)
logger.info(f"Adding {data.username} to room:{self.room_id}")
await typing.cast(
typing.Awaitable[int],
cache.client.rpush(f"room:{self.room_id}:users", data.model_dump_json()),
)
async def leave(self, user: User) -> None:
"""Remove a user from this room."""
data = _db_user_to_game_member(user)
self.users.remove(data)
logger.info(f"Removing {data.username} from room:{self.room_id}")
res = await typing.cast(
typing.Awaitable[int],
cache.client.lrem(f"room:{self.room_id}:users", 1, data.model_dump_json()),
)
if res != 1:
logger.warning(
f"Attempted removing {data.username} from room:{self.room_id} "
f"but Redis gave a response != 1 ({res=})"
)
async def to_redis(self) -> None:
"""Writes the room to Redis."""
# all the dictionaries are being dumped to redis as JSON strings
# room:id:users will be a list of JSON strings
key = f"room:{self.room_id}"
owner = self.owner.model_dump_json()
users = [i.model_dump_json() for i in self.users]
status = str(self.status)
logger.info(f"Writing {key} to Redis")
async with cache.client.pipeline(transaction=True) as pipe:
pipe.set(f"{key}:owner", owner)
pipe.set(f"{key}:status", str(status))
if len(users) > 0:
pipe.rpush(f"{key}:users", *users)
await pipe.execute()
logger.info(f"Saved {key} to Redis")
@classmethod
async def from_redis(cls: type["Room"], room_id: str) -> typing.Optional["Room"]:
key = f"room:{room_id}"
logger.info(f"Fetching {key} from Redis")
status = await cache.client.get(f"{key}:status")
if not status:
logger.warning(f"{key} does not exist in cache")
return
owner_res = await cache.client.get(f"{key}:owner")
owner = loads(owner_res)
# redis-py has incorrect return types set, so we need to cast here
# https://github.com/redis/redis-py/issues/2933
users_res = await typing.cast(
typing.Awaitable[list[bytes]], cache.client.lrange(f"{key}:users", 0, -1)
)
users = [loads(i) for i in users_res]
return cls(room_id=room_id, owner=owner, users=users, status=status.decode())
# Path: quill_server/realtime/room.py
class ChatMessage(BaseModel):
"""Represents a message sent by a Quill player."""
username: str
message: str
has_guessed: bool
# Path: quill_server/realtime/room.py
def _db_user_to_game_member(user: User) -> GameMember:
return GameMember(user_id=str(user.id), username=user.username)
# Path: quill_server/schema.py
class MessageResponse(BaseModel):
message: str
# Path: quill_server/realtime/events.py
from enum import StrEnum, auto
from functools import partial
from typing import Any, Generic, TypeVar
from collections.abc import Awaitable
from loguru import logger
from pydantic import BaseModel
from redis.asyncio import Redis
from quill_server.db.models import User
from quill_server.realtime.room import GameMember, Room, ChatMessage, _db_user_to_game_member
from quill_server.schema import MessageResponse
import typing
DataT = TypeVar("DataT", bound=BaseModel)
# the excalidraw element event contains many fields
# https://github.com/excalidraw/excalidraw/blob/master/src/element/types.ts#L27-L141
ExcalidrawElement = dict[str, Any]
class Drawing(BaseModel):
| user: GameMember |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: OPTML-Group/DeepZero
# Path: algorithm/prune/importance_scores.py
def zoo_grasp_importance_score(
model,
dataloader,
samples_per_class,
class_num,
zoo_rs_size,
zoo_step_size,
loss_func = torch.nn.CrossEntropyLoss()
):
score_dict = {}
device = next(model.parameters()).device
x, y = fetch_data(dataloader, class_num, samples_per_class)
x, y = x.to(device), y.to(device)
params = extract_conv2d_and_linear_weights(model)
f_theta = partial(f, network=model, x=x, y=y, loss_func=loss_func)
g0 = rge(f_theta, params, zoo_rs_size, zoo_step_size)
modified_params = {}
for key, param in params.items():
modified_params[key] = param.data + g0[key].data * zoo_step_size
g1 = rge(f_theta, modified_params, zoo_rs_size, zoo_step_size)
Hg = {}
for key, param in params.items():
Hg[key] = (g1[key].data - g0[key].data) / zoo_step_size
for name, m in model.named_modules():
if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)):
if hasattr(m, "weight_orig"):
score_dict[(m, 'weight')] = -m.weight_orig.clone().detach() * Hg[f'{name}.weight_orig']
else:
score_dict[(m, 'weight')] = -m.weight.clone().detach() * Hg[f'{name}.weight']
return score_dict
# Path: algorithm/prune/importance_scores.py
def grasp_importance_score(
model,
dataloader,
samples_per_class,
class_num,
loss_func = torch.nn.CrossEntropyLoss()
):
temperature = 200
score_dict = {}
model.zero_grad()
device = next(model.parameters()).device
x, y = fetch_data(dataloader, class_num, samples_per_class)
x, y = x.to(device), y.to(device)
loss = loss_func(model(x) / temperature, y)
gs = grad(loss, model.parameters(), create_graph=True)
model.zero_grad()
t = sum([(g*g.data).sum() for g in gs])
t.backward()
for m in model.modules():
if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)):
if hasattr(m, "weight_orig"):
score_dict[(m, 'weight')] = (m.weight_orig.grad.clone().detach() * m.weight.clone().detach()).abs()
else:
score_dict[(m, 'weight')] = (m.weight.grad.clone().detach() * m.weight.clone().detach()).abs()
model.zero_grad()
for g in gs:
del g.grad
return score_dict
# Path: algorithm/prune/importance_scores.py
def random_importance_score(
model
):
score_dict = {}
for m in model.modules():
if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)):
score_dict[(m, 'weight')] = torch.randn_like(m.weight)
return score_dict
# Path: algorithm/prune/main.py
import torch
from torch.nn.utils import prune
from copy import deepcopy
from .importance_scores import zoo_grasp_importance_score, grasp_importance_score, random_importance_score
__all__ = ['global_prune', 'check_sparsity', 'check_grad_sparsity', 'custom_prune', 'extract_mask', 'remove_prune', 'layer_sparsity']
def global_prune(model, ratio, method, class_num=None, dataloader=None, sample_per_classes=25, zoo_sample_size=None, zoo_step_size=None, layer_wise_sparsity=None):
if method == 'grasp':
score_dict = grasp_importance_score(model, dataloader, sample_per_classes, class_num)
prune.global_unstructured(
parameters=score_dict.keys(),
pruning_method=prune.L1Unstructured,
amount=ratio,
importance_scores=score_dict,
)
elif method == 'zo_grasp':
| score_dict = zoo_grasp_importance_score(model, dataloader, sample_per_classes, class_num, zoo_sample_size, zoo_step_size) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: S3raphimCS/Hackathon_telehack
# Path: backend/SPO_KROT/metrics/models.py
class ExcelFile(models.Model):
file = models.FileField(
upload_to='metrics',
unique=True,
blank=True, null=True,
validators=[FileExtensionValidator(['xlsx', 'xls', 'xlsm'])],
)
@property
def filename(self):
return self.file.name.split('/')[-1:][0]
# Path: backend/SPO_KROT/metrics/models.py
class Measurements(models.Model):
"""Модель записи измерений из отчета, которые будут изменяться."""
class Meta:
verbose_name = "Измерение"
verbose_name_plural = "Измерения"
operator = models.ForeignKey(
Operator,
on_delete=models.CASCADE,
)
report = models.ForeignKey(
Report,
on_delete=models.CASCADE,
)
voice_service_non_accessibility = models.FloatField(
_("Доля неуспешных попыток установления голосового соединения"),
validators=PERCENTAGE_VALIDATOR
)
voice_service_cut_off = models.FloatField(
_("Доля обрывов голосовых соединений"),
validators=PERCENTAGE_VALIDATOR
)
speech_quality_on_call = models.FloatField(
_("Средняя разборчивость речи на соединение"),
)
negative_mos_samples_ratio = models.FloatField(
_("Доля голосовых соединений с низкой разборчивостью речи"),
validators=PERCENTAGE_VALIDATOR
)
undelivered_messages = models.FloatField(
_("Доля недоставленных SMS сообщений"),
validators=PERCENTAGE_VALIDATOR
)
avg_sms_delivery_time = models.FloatField(
_("Среднее время доставки SMS сообщений"),
)
http_failure_session = models.FloatField(
_("Доля неуспешных сессий по протоколу HTTP"),
validators=PERCENTAGE_VALIDATOR
)
http_ul_mean_userdata_rate = models.FloatField(
_("Среднее значение скорости передачи данных от абонента"),
)
http_dl_mean_userdata_rate = models.FloatField(
_("Среднее значение скорости передачи данных к абоненту"),
)
http_session_time = models.FloatField(
_("Продолжительность успешной сессии"),
)
number_of_test_voice_connections = models.IntegerField(
_("Общее количество тестовых голосовых соединений "),
)
number_of_voice_sequences = models.IntegerField(
_("Общее количество голосовых последовательностей в оцениваемых соединениях"),
)
voice_connections_with_low_intelligibility = models.IntegerField(
_("Количество голосовых соединений с низкой разборчивостью"),
)
number_of_sms_messages = models.IntegerField(
_("Общее количество отправленных SMS - сообщений"),
)
number_of_connections_attempts_http = models.IntegerField(
_("Общее количество попыток соединений с сервером передачи данных HTTP"),
)
number_of_test_sessions_http = models.IntegerField(
_("Общее количество тестовых сессий по протоколу HTTP"),
)
def __str__(self):
return f"Метрика {self.operator} из отчета {self.report}"
# Path: backend/SPO_KROT/metrics/models.py
class Operator(models.Model):
"""Модель операторов связи для возможности добавления новых."""
class Meta:
verbose_name = "Оператор"
verbose_name_plural = "Операторы"
name = models.CharField(
_("Название оператора"),
max_length=50,
blank=False, null=False,
unique=True,
)
def __str__(self) -> models.CharField:
return self.name
# Path: backend/SPO_KROT/metrics/models.py
class Report(models.Model):
"""Модель отчетов для потенциального хранения информации об отчетах в БД."""
class Meta:
verbose_name = "Отчет"
verbose_name_plural = "Отчеты"
title = models.CharField(
_("Название отчета"),
max_length=200,
blank=False, null=False,
)
region = models.CharField(
_("Регион"),
max_length=50,
blank=True, null=True,
)
city = models.CharField(
_("Город"),
max_length=100,
blank=True, null=True
)
start_date = models.DateField(
_("Дата начала измерений"),
blank=True, null=True,
)
end_date = models.DateField(
_("Дата конца измерений"),
blank=True, null=True,
)
publisher = models.ForeignKey(
get_user_model(),
on_delete=models.SET_NULL,
null=True,
)
published = models.DateTimeField(
auto_now_add=True,
)
def __str__(self):
return f"с {self.start_date} по {self.end_date} Отчет: {self.title}"
# Path: backend/SPO_KROT/metrics/admin.py
from django.contrib import admin
from .models import ExcelFile, Measurements, Operator, Report
@admin.register(Operator)
class OperatorAdmin(admin.ModelAdmin):
list_display = ('name',)
list_per_page = 15
search_fields = ("name",)
readonly_fields = ('id',)
| @admin.register(Report) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lz1oceani/LLM-As-Hierarchical-Policy
# Path: hlm/utils/math_answer_utils.py
def normalize_answer(text, answer_type="text"):
ret = normalize_answer_core(text, answer_type)
try:
str(ret)
except:
ret = None
return "No answer!" if ret is None else ret
# Path: hlm/utils/math_answer_utils.py
def is_set(item):
type_str = str(type(item)).lower()
return "sympy" in type_str and "set" in type_str
# Path: hlm/utils/math_answer_utils.py
def is_sympy(item):
return "sympy" in str(type(item)).lower()
# Path: hlm/utils/math_answer_utils.py
def is_constant(item):
if isinstance(item, Number):
return True
elif hasattr(item, "is_constant") and item.is_constant():
return True
else:
return False
# Path: hlm/utils/math_answer_utils.py
def to_set(point): # (x, y) can be a point or a open interval
if is_point(point, dim=2):
return Interval.open(point[0], point[1])
elif isinstance(point, Number):
return FiniteSet(point)
elif isinstance(point, (list, tuple)):
return FiniteSet(*point)
else:
return point
# Path: hlm/utils/math_answer_utils.py
def is_relation(item):
type_str = str(type(item)).lower()
return "sympy" in type_str and "relation" in type_str
# Path: hlm/utils/metric_utils.py
import os, warnings
import numpy as np, re, time, signal, sympy, scipy
from sympy.utilities.exceptions import SymPyDeprecationWarning
from collections import defaultdict
from numbers import Number
from IPython import embed
from copy import deepcopy
from itertools import chain
from sympy.parsing.latex import parse_latex
from sympy.core.expr import Expr
from sympy import Interval, conjugate, Abs
from .math_answer_utils import normalize_answer, is_set, is_sympy, is_constant, to_set, is_relation
from math import *
from .misc import timeout_call
os.environ["USE_SYMENGINE"] = "1"
warnings.simplefilter("ignore", SyntaxWarning)
warnings.simplefilter("ignore", RuntimeWarning)
warnings.filterwarnings("ignore", category=SymPyDeprecationWarning)
# from sympy import Symbol, Eq, simplify, solve
NO_ANSWER = "No answer!"
SKIP_ANSWER_TEMPLATE = [
"Code cannot be executed!",
"Code contains infinite loop!",
"no possible values",
NO_ANSWER,
]
SKIP_ANSWER_TEMPLATE = SKIP_ANSWER_TEMPLATE + [_.lower() for _ in SKIP_ANSWER_TEMPLATE]
ZERO_ANSWER_TEMPLATE = [
"doesn't have any money left",
"used up all of",
]
def check_basics(source, target):
if not (isinstance(source, (Expr, Number)) and isinstance(target, (Expr, Number))):
return True
source_symbols = source.free_symbols if isinstance(source, Expr) else {}
target_symbols = target.free_symbols if isinstance(target, Expr) else {}
if source_symbols != target_symbols:
return False
try:
if len(source_symbols) > 0:
values = {_: np.random.rand() for _ in source_symbols}
source = source.subs(values)
target = target.subs(values)
else:
source = source.evalf()
target = target.evalf()
if not isinstance(source, Number) or not isinstance(target, Number):
source = abs(source).simplify() if not isinstance(source, Number) else source
target = abs(target).simplify() if not isinstance(target, Number) else target
return bool(np.abs(source - target) < 1e-6)
except:
pass
return True
def run_sympy_compare(source, target):
def has_fn(x):
for name in ["equals", "compare", "intersect"]:
if hasattr(x, name):
return True
return False
# print(is_constant(source), is_constant(target))
# return False
if is_constant(source) and is_constant(target):
source = source if isinstance(source, Number) else source.evalf()
target = target if isinstance(target, Number) else target.evalf()
try:
return bool(np.abs(source - target) < 1e-6)
except:
return False
if is_set(source) or is_set(target):
source = to_set(source)
target = to_set(target)
if not has_fn(source):
source, target = target, source
assert has_fn(source), [source, target, type(source), type(target)]
try:
if hasattr(source, "equals"): # Work for expressions and points
if is_relation(source) != is_relation(target):
return False
if not is_relation(source) and not check_basics(source, target):
return False
ret = source.equals(target)
ret = False if ret is None else bool(ret)
elif hasattr(source, "intersect"):
sign1 = source.intersect(target.complement(sympy.S.Reals)).simplify()
sign1 = sign1.is_empty or (np.abs(sign1.measure) < 1e-6 and sign1.is_open)
sign2 = target.intersect(source.complement(sympy.S.Reals)).simplify()
sign2 = sign2.is_empty or (np.abs(sign2.measure) < 1e-6 and sign2.is_open)
ret = sign1 and sign2
elif hasattr(source, "compare"):
ret = source.compare(target) == 0
except:
ret = False
return bool(ret)
def compare_items(source, target, answer_type="text", need_normalize=True):
if isinstance(source, (list, tuple)):
return [compare_items(_, target, answer_type=answer_type, need_normalize=need_normalize) for _ in source]
if source == "No answer!" or target == "No answer!" or source is None or target is None:
return False
if answer_type in ["text", "date", "bool"]:
return source.lower() == target.lower()
if isinstance(source, str) and isinstance(target, str):
if "=" in source and "=" not in target:
source = source.split("=")[-1]
if "=" in target and "=" not in source:
target = target.split("=")[-1]
if need_normalize:
| source = normalize_answer(source, answer_type) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mitre/arlin
# Path: arlin/dataset/collectors/sb3_collectors.py
class SB3DQNDataCollector(BaseDataCollector):
"""Data collector for a model trained with DQN in stable-baselines3."""
def __init__(self, datapoint_cls: Type[BaseDatapoint], policy: BasePolicy):
super().__init__(datapoint_cls=datapoint_cls)
self.policy = policy
def collect_internal_data(
self, observation: np.ndarray
) -> Tuple[type[BaseDatapoint], int]:
with th.no_grad():
obs = th.Tensor(np.expand_dims(observation, 0))
features = self.policy.extract_features(
obs, self.policy.q_net.features_extractor
)
latent_q = self.policy.q_net.q_net[:-1](features)
q_vals = self.policy.q_net.q_net[-1](latent_q)
action = q_vals.argmax(dim=1).reshape(-1).item()
datapoint = self.datapoint_cls(
q_vals=th.squeeze(q_vals).numpy(),
latent_qs=th.squeeze(latent_q).numpy(),
features=th.squeeze(features).numpy(),
)
return datapoint, action
# Path: arlin/dataset/collectors/sb3_collectors.py
class SB3PPODataCollector(BaseDataCollector):
"""Data collector for a model trained with PPO in stable-baselines3."""
def __init__(self, datapoint_cls: Type[BaseDatapoint], policy: BasePolicy):
super().__init__(datapoint_cls=datapoint_cls)
self.policy = policy
def collect_internal_data(
self, observation: np.ndarray
) -> Tuple[type[BaseDatapoint], int]:
with th.no_grad():
obs = th.Tensor(np.expand_dims(observation, 0))
policy_dist = self.policy.get_distribution(obs)
action = policy_dist.get_actions(deterministic=True).item()
probs = policy_dist.distribution.probs
value = self.policy.predict_values(obs)
features = self.policy.extract_features(obs)
if self.policy.share_features_extractor:
latent_pi, latent_vf = self.policy.mlp_extractor(features)
pi_features = features
vf_features = features
else:
pi_features, vf_features = features
latent_pi = self.policy.mlp_extractor.forward_actor(pi_features)
latent_vf = self.policy.mlp_extractor.forward_critic(vf_features)
datapoint = self.datapoint_cls(
latent_actors=th.squeeze(latent_pi).numpy(),
latent_critics=th.squeeze(latent_vf).numpy(),
dist_probs=th.squeeze(probs).numpy(),
critic_values=th.squeeze(value).item(),
pi_features=th.squeeze(pi_features).numpy(),
vf_features=th.squeeze(vf_features).numpy(),
)
return datapoint, action
# Path: arlin/dataset/collectors/datapoints.py
class SB3DQNDatapoint(BaseDatapoint):
"""Datapoint for a DQN algorithm trained in stable-baselines3."""
q_vals: Optional[np.ndarray] = None
latent_qs: Optional[np.ndarray] = None
features: Optional[np.ndarray] = None
# Path: arlin/dataset/collectors/datapoints.py
class SB3PPODatapoint(BaseDatapoint):
"""Datapoint for a PPO algorithm trained in stable-baselines3."""
latent_actors: Optional[np.ndarray] = None
latent_critics: Optional[np.ndarray] = None
dist_probs: Optional[np.ndarray] = None
critic_values: Optional[float] = None
pi_features: Optional[np.ndarray] = None
vf_features: Optional[np.ndarray] = None
# Path: tests/test_dataset/test_collectors/test_sb3_collectors.py
import pytest
from stable_baselines3 import DQN
from arlin.dataset.collectors import SB3DQNDataCollector, SB3PPODataCollector
from arlin.dataset.collectors.datapoints import SB3DQNDatapoint, SB3PPODatapoint
@pytest.fixture
def dqn_model(env):
model = DQN("MlpPolicy", env, verbose=1)
model.learn(total_timesteps=int(100))
return model
class TestSB3Collectors:
def test_sb3_ppo_collector(self, ppo_model, env):
| collector = SB3PPODataCollector(SB3PPODatapoint, ppo_model.policy) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Giftify-Bot/Giftify-Bot
# Path: utils/constants.py
ARROW_BACK_EMOJI = "<:GiftifyBack:1120372002939744308>"
# Path: utils/constants.py
ARROW_EMOJI = "<:GiftifyArrow:1117849870678638653>"
# Path: utils/constants.py
STOP_EMOJI = "<:GiftifyStop:1120372964811079771>"
# Path: utils/tree.py
class CommandTree(app_commands.CommandTree):
async def on_error(
self,
interaction: Interaction,
error: app_commands.AppCommandError,
) -> None:
# Path: utils/paginator.py
import abc
import discord
from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, Union
from discord.ext import commands
from typing import TypeAlias
from typing_extensions import TypeAlias
from utils.constants import ARROW_BACK_EMOJI, ARROW_EMOJI, STOP_EMOJI
from utils.tree import Interaction
from bot import Giftify
@property
def max_page(self) -> int:
"""The max page count for this paginator."""
return len(self.pages)
@property
def min_page(self) -> int:
"""The min page count for this paginator."""
return 1
@property
def current_page(self) -> int:
"""The current page the user is on."""
return self._current_page_index + 1
@property
def total_pages(self) -> int:
"""Returns the total amount of pages."""
return len(self.pages)
@abc.abstractmethod
def format_page(self, entries: List[T], /) -> discord.Embed:
"""
Used to make the embed that the user sees. This can be a coroutine or a regular
function. This must be overwritten by the subclass.
Parameters
----------
entries: List[Any]
A list of entries for the current page.
Returns
-------
discord.Embed
The embed for this page.
"""
raise NotImplementedError("Subclass did not overwrite format_page coro.")
async def embed(self) -> discord.Embed:
"""
A helper function to get the embed for the current page.
Returns
-------
discord.Embed
The embed for the current page.
"""
return await discord.utils.maybe_coroutine(
self.format_page, self.pages[self._current_page_index]
)
async def interaction_check(self, interaction: Interaction, /) -> Optional[bool]:
"""
The base interaction check for the given view.
This will always return ``True`` if the target is ``None``, otherwise it will check
that the user invoking the paginator is the same user that is interacting with the
paginator.
Parameters
----------
interaction: discord.Interaction
The interaction to check.
Returns
-------
Optional[bool]
The result of the interaction check. If this returns ``None`` then the interaction
was responded to with an error message to the user.
"""
if self.target is None:
return True
assert self.author
# Ensure this is the correct invoker
if self.author.id != interaction.user.id:
return await interaction.response.send_message(
"Hey, this isn't yours!", ephemeral=True
)
# Ensure they invoke it in the correct channel.
if (
self.target.channel
and interaction.channel
and self.target.channel.id != interaction.channel.id
):
return await interaction.response.send_message(
"Hey, this isn't in the right channel!", ephemeral=True
)
return True
def _switch_page(self, count: int, /) -> None:
self._current_page_index += count
if self.clamp_pages:
if count < 0: # Going down
if self._current_page_index < 0:
self._current_page_index = self.max_page - 1
elif count > 0: # Going up
if self._current_page_index > self.max_page - 1: # - 1 for indexing
self._current_page_index = 0
return
@discord.ui.button(emoji=ARROW_BACK_EMOJI)
async def on_arrow_backward(
self, interaction: Interaction, button: discord.ui.Button[BaseButtonPaginator]
) -> discord.InteractionMessage:
"""
The button to represent going backwards a page.
Parameters
----------
interaction: discord.Interaction
The interaction created from the user invoking the button.
button: discord.ui.Button
The button that was pressed.
"""
await interaction.response.defer()
self._switch_page(-1)
embed = await self.embed()
return await interaction.edit_original_response(embed=embed)
| @discord.ui.button(emoji=STOP_EMOJI) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Zjy0401/CoCoFormer
# Path: utilities/device.py
def get_device():
if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):
return TORCH_CPU_DEVICE
else:
return TORCH_CUDA_DEVICE
# Path: utilities/argument_funcs.py
def parse_train_args():
parser = argparse.ArgumentParser()
parser.add_argument("-input_dir", type=str, default="./dataset/dataset/JSF_SATB", help="Folder of preprocessed and pickled midi files")
parser.add_argument("-output_dir", type=str, default="./baseline_3loss", help="Folder to save model weights. Saves one every epoch")
parser.add_argument("-weight_modulus", type=int, default=1, help="How often to save epoch weights (ex: value of 10 means save every 10 epochs)")
parser.add_argument("-print_modulus", type=int, default=1, help="How often to print train results for a batch (batch loss, learn rate, etc.)")
parser.add_argument("-word2event", type=str, default='./dataset/word2event.pkl', help='word table location: *.pkl')
parser.add_argument("-n_workers", type=int, default=2, help="Number of threads for the dataloader")
parser.add_argument("--force_cpu", action="store_true", help="Forces model to run on a cpu even when gpu is available")
parser.add_argument("--gpu", default=[2], nargs='+', type=int, help="For Multi-GPUs training")
parser.add_argument("--no_tensorboard", action="store_true", help="Turns off tensorboard result reporting")
parser.add_argument('--scheduled_sampling', default=False, help='False means use teacher forcing, True means use scheduled_sampling')
parser.add_argument("--scheduled_sampling_change_ratio", default=0.5, type=int, help='ratio about mix golden target with output')
parser.add_argument("-continue_weights", type=str, default=None, help="Model weights to continue training based on")
parser.add_argument("-continue_epoch", type=int, default=None, help="Epoch the continue_weights model was at")
parser.add_argument("-lr", type=float, default=None, help="Constant learn rate. Leave as None for a custom scheduler.")
parser.add_argument("-ce_smoothing", type=float, default=None, help="Smoothing parameter for smoothed cross entropy loss (defaults to no smoothing)")
parser.add_argument("-batch_size", type=int, default=2, help="Batch size per gpu to use")
parser.add_argument("-epochs", type=int, default=300, help="Number of epochs to use")
parser.add_argument("-adv_train", default=True, help='add discriminator loss')
parser.add_argument("-only_Transformer", default=False, help='use pure Transformer, default set to false, True only for test')
parser.add_argument("-loss", default=[0.4, 0.2, 0.8], nargs='+', type=float, help='weights of loss, the last element effect when adv train is True')
parser.add_argument("--rpr", action="store_true", help="Use a modified Transformer for Relative Position Representations")
parser.add_argument("-max_sequence", type=int, default=2048, help="Maximum midi sequence to consider")
parser.add_argument("-n_layers", type=int, default=6, help="Number of decoder layers to use")
parser.add_argument("-num_heads", type=int, default=8, help="Number of heads to use for multi-head attention")
parser.add_argument("-d_model", type=int, default=512, help="Dimension of the model (output dim of embedding layers, etc.)")
parser.add_argument("-dim_feedforward", type=int, default=1024, help="Dimension of the feedforward layer")
parser.add_argument("-dropout", type=float, default=0.1, help="Dropout rate")
parser.add_argument("--metrics", default=False, help="evaluate TER(token error rate)")
return parser.parse_args()
# Path: model/rpr.py
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from torch.nn import Module
from torch.nn.modules.transformer import _get_clones
from torch.nn.modules.linear import Linear
from torch.nn.modules.dropout import Dropout
from torch.nn.modules.normalization import LayerNorm
from torch.nn.init import *
from torch.nn.modules.activation import MultiheadAttention
from torch.nn.functional import linear, softmax, dropout
from utilities.device import get_device
from utilities.argument_funcs import parse_train_args
# TransformerEncoderRPR
class TransformerEncoderRPR(Module):
def __init__(self, encoder_layer, num_layers, encoder_past, max_seq, c_max_seq, b_max_seq, norm=None):
super(TransformerEncoderRPR, self).__init__()
self.past_layers = _get_clones(encoder_past, 1)
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.max_seq = max_seq
self.c_max_seq = c_max_seq
self.b_max_seq = b_max_seq
def forward(self, src, mask=None, src_key_padding_mask=None):
| args = parse_train_args() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: a16z-infra/sunlight
# Path: model/diffbot.py
class DiffbotClient(object):
BASE_API_URL = 'http://api.diffbot.com'
TIMEOUT_MS = 15000
def request(self, url, token, api, version=3):
''' Issue a request to the Diffbot API and return the response if valid JSON '''
params = {'url': url, 'token': token, 'timeout': self.TIMEOUT_MS}
try:
response = requests.get(f'{self.BASE_API_URL}/v{version}/{api}', params=params, timeout=self.TIMEOUT_MS)
response.raise_for_status()
except:
raise Exception('REMOTE_ERROR')
return response.json()
# Path: model/prompts.py
BIAS_REPORT = '''Critique the following possibly-biased article unless it is too short.
Instructions:
1. Identify any bias -- especially political bias.
2. If the article is fair, be fair in your critique. If it is biased, be harsh and critical about the issues.
3. Use specific examples and quote directly where possible.
4. Call out any opinion, hyperbole, and speculation.
5. Assess where this article lies on the political spectrum.
6. Write the critique as 3-5 paragraphs separated by two (2) newline characters.
7. If the article is very short or truncated, explain the problem in one paragraph and do not critique it.
### Headline:
{headline}
### Body:
{body}
### Critical Review:
'''
# Path: model/prompts.py
FACTUAL_CLAIMS = u'''Summarize the factual claims made in this article in a bulleted list separated by \u2022 unless it is too short.
Instructions:
1. Order the facts by decreasing importance
2. Use extremely concise, simple language
3. If the article is very short or truncated, request that user elaborate or re-host.
### Headline:
{headline}
### Body:
{body}:
### Factual Claims:
'''
# Path: model/prompts.py
SLANT_DESCRIPTION = '''Describe the slant critiqued in the following Bias Report in 1-2 words. Be creative, pithy, and accurate.
Example slants: Fair, Left-leaning, Extreme Right, Environmentalist, Bitcoin Maximalist, Conservative, Conspiracist, Impartial
### Bias Report:
{bias_report}
### Slant:
'''
# Path: model/agent.py
from datetime import datetime
from threading import Thread
from langchain.callbacks.base import BaseCallbackHandler
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from .diffbot import DiffbotClient
from .prompts import BIAS_REPORT, FACTUAL_CLAIMS, SLANT_DESCRIPTION
import fcntl
import json
import logging
import multiprocessing
import os
import tiktoken
DIFFBOT_API_KEY = os.environ['DIFFBOT_API_KEY']
REQUEST_LOG_FILE = os.environ['REQUEST_LOG_FILE']
MAX_MODEL_CONTEXT = {
'gpt-3.5-turbo': 4096,
'text-davinci-003': 4096,
'gpt-4': 8192,
}
class OpenAIStreamHandler(BaseCallbackHandler):
def __init__(self, stream_queue, *args, **kwargs):
super(OpenAIStreamHandler, self).__init__(*args, **kwargs)
self.stream_queue = stream_queue
def on_llm_new_token(self, token, *args, **kwargs):
self.stream_queue.put(token)
def on_llm_end(self, *args, **kwargs):
self.stream_queue.put(False)
class Agent(multiprocessing.Process):
def __init__(self, in_queue, out_queue):
super(Agent, self).__init__()
logging.basicConfig(filename='/var/log/build/sunlight.out', level=logging.INFO)
self.in_queue = in_queue
self.out_queue = out_queue
self.fact_prompt = PromptTemplate(input_variables=['headline', 'body'], template=FACTUAL_CLAIMS)
self.critique_prompt = PromptTemplate(input_variables=['headline', 'body'], template=BIAS_REPORT)
self.slant_prompt = PromptTemplate(input_variables=['bias_report'], template=SLANT_DESCRIPTION)
gpt35 = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0.0, request_timeout=300)
davinci = ChatOpenAI(model_name='text-davinci-003', temperature=0.0, request_timeout=300)
gpt4 = ChatOpenAI(model_name='gpt-4', temperature=0.0, request_timeout=900)
self.stream_queue = multiprocessing.Queue()
gpt4_stream = ChatOpenAI(
model_name='gpt-4',
temperature=0.0,
streaming=True,
callbacks=[OpenAIStreamHandler(stream_queue=self.stream_queue)],
request_timeout=900,
)
self.fact_chains = {
'gpt-3.5-turbo': LLMChain(llm=gpt35, prompt=self.fact_prompt),
'text-davinci-003': LLMChain(llm=davinci, prompt=self.fact_prompt),
'gpt-4': LLMChain(llm=gpt4_stream, prompt=self.fact_prompt),
}
self.critique_chains = {
'gpt-3.5-turbo': LLMChain(llm=gpt35, prompt=self.critique_prompt),
'text-davinci-003': LLMChain(llm=davinci, prompt=self.critique_prompt),
'gpt-4': LLMChain(llm=gpt4_stream, prompt=self.critique_prompt),
}
self.slant_chains = {
'gpt-3.5-turbo': LLMChain(llm=gpt35, prompt=self.slant_prompt),
'text-davinci-003': LLMChain(llm=davinci, prompt=self.slant_prompt),
'gpt-4': LLMChain(llm=gpt4, prompt=self.slant_prompt),
}
self._load_processed_jobs()
def run(self):
logging.basicConfig(filename='/var/log/build/sunlight.out', level=logging.INFO)
| diffbot = DiffbotClient() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: elenacliu/GraspStudio
# Path: cameras/camera.py
class CameraConfig(InstantiateConfig):
"""Camera Config"""
_target: Type = field(default_factory=lambda : Camera)
# focal length of x axis
fx: float = 0.0
# focal length of y axis
fy: float = 0.0
# optical center of x
ppx: float = 0.0
# optical center of y
ppy: float = 0.0
# resolution x (width)
w: int = 0.0
# resolution y (height)
h: int = 0.0
# image size
image_size_w: int = 1280
image_size_h: int = 720
# calibration matrix (camera on hand or camera on base)
calibration: NDArray[np.float64] = None
# depth camera focal length of x axis (optional)
depth_fx: Optional[float] = None
# depth camera focal length of y axis (optional)
depth_fy: Optional[float] = None
# depth camera ppx
depth_ppx: Optional[float] = None
# depth camera ppy
depth_ppy: Optional[float] = None
# depth resolution x (width)
depth_w: Optional[int] = None
# depth esolution y (height)
depth_h: Optional[int] = None
# Path: cameras/camera.py
class Camera:
config: CameraConfig
def __init__(self, config : CameraConfig):
self.config = config
def rgb(self) -> NDArray:
raise NotImplementedError('You should use a specified subclass!')
def rgbd(self) -> Tuple[NDArray, NDArray]:
raise NotImplementedError('You should use a specified subclass!')
def depth_to_point_cloud(self, organized=False) -> Tuple[NDArray, NDArray]:
"""
organized: bool
whether to keep the cloud in image shape (H,W,3)
"""
color_img, depth_img = self.rgbd()
color_img = np.array(color_img, dtype=np.float32) / 255.0
depth_img = np.array(depth_img / 1000, dtype=np.float32)
# depth image resize to the color image size
# just use the original size of depth image and color image
# depth_img = cv2.resize(depth_img, (self.config.image_size_w, self.config.image_size_h), interpolation=cv2.INTER_NEAREST)
# color_img = cv2.resize(color_img, (self.config.image_size_w, self.config.image_size_h), interpolation=cv2.INTER_LINEAR)
# the scale should be considering again
h, w = depth_img.shape
# scale camera parameters
scale_x = w / self.config.depth_w
scale_y = h / self.config.depth_h
fx = self.config.depth_fx * scale_x
fy = self.config.depth_fy * scale_y
x_offset = self.config.depth_ppx * scale_x
y_offset = self.config.depth_ppy * scale_y
indices = torch.from_numpy(np.indices((h, w), dtype=np.float32).transpose(1,2,0))
z_e = torch.from_numpy(depth_img)
x_e = (indices[..., 1] - x_offset) * z_e / fx
y_e = (indices[..., 0] - y_offset) * z_e / fy
point_cloud = torch.stack([x_e, y_e, z_e], axis=-1).numpy() # Shape: [H x W x 3]
if not organized:
color_img = color_img.reshape(-1, 3)
point_cloud = point_cloud.reshape(-1, 3)
return color_img, point_cloud
@property
def intrinsic(self):
return {
'fx': self.config.fx,
'fy': self.config.fy,
'cx': self.config.ppx,
'cy': self.config.ppy,
'w': self.config.w,
'h': self.config.h
}
@property
def depth_intrinsic(self):
return {
'fx': self.config.depth_fx,
'fy': self.config.depth_fy,
'cx': self.config.depth_ppx,
'cy': self.config.depth_ppy,
'w': self.config.depth_w,
'h': self.config.depth_h
}
# Path: cameras/realsense.py
from dataclasses import dataclass, field
from typing import Type
from .camera import CameraConfig, Camera
import pyrealsense2 as rs
import numpy as np
import cv2
# Copyright 2023 Chang Liu. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@dataclass
class RealSenseCameraConfig(CameraConfig):
_target: Type = field(default_factory=lambda : RealSenseCamera)
exposure: float = 500.0
max_depth_value: float = 800.0
| class RealSenseCamera(Camera): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: serl-robot/serl
# Path: serl/wrappers/frame_stack.py
class FrameStack(gym.Wrapper):
def __init__(self, env, num_stack: int, stacking_key: str = "pixels"):
super().__init__(env)
self._num_stack = num_stack
self._stacking_key = stacking_key
for key in stacking_key:
assert key in self.observation_space.spaces
pixel_obs_spaces = self.observation_space.spaces[key]
self._env_dim = pixel_obs_spaces.shape[-1]
low = np.repeat(pixel_obs_spaces.low[..., np.newaxis], num_stack, axis=-1)
high = np.repeat(pixel_obs_spaces.high[..., np.newaxis], num_stack, axis=-1)
new_pixel_obs_spaces = Box(low=low, high=high, dtype=pixel_obs_spaces.dtype)
self.observation_space.spaces[key] = new_pixel_obs_spaces
self._frames = collections.deque(maxlen=num_stack)
def reset(self):
obs, info = self.env.reset()
for i in range(self._num_stack):
self._frames.append({key: obs[key] for key in self._stacking_key})
for k in self._stacking_key:
obs[k] = self.frames[k]
return obs, info
@property
def frames(self):
tmp = {}
for k in self._stacking_key:
tmp[k] = np.stack([frame[k] for frame in self._frames], axis=-1)
return tmp
def step(self, action):
obs, reward, done, truncated, info = self.env.step(action)
self._frames.append({k: obs[k] for k in self._stacking_key})
for k in self._stacking_key:
obs[k] = self.frames[k]
return obs, reward, done, truncated, info
# Path: serl/wrappers/repeat_action.py
class RepeatAction(gym.Wrapper):
def __init__(self, env, action_repeat=4):
super().__init__(env)
self._action_repeat = action_repeat
def step(self, action: np.ndarray):
total_reward = 0.0
done = None
combined_info = {}
for _ in range(self._action_repeat):
obs, reward, done, info = self.env.step(action)
total_reward += reward
combined_info.update(info)
if done:
break
return obs, total_reward, done, combined_info
# Path: serl/wrappers/universal_seed.py
class UniversalSeed(gym.Wrapper):
def seed(self, seed: int):
seeds = self.env.seed(seed)
self.env.observation_space.seed(seed)
self.env.action_space.seed(seed)
return seeds
# Path: serl/wrappers/pixels.py
from typing import Optional, Tuple
from gym.wrappers.pixel_observation import PixelObservationWrapper
from serl.wrappers.frame_stack import FrameStack
from serl.wrappers.repeat_action import RepeatAction
from serl.wrappers.universal_seed import UniversalSeed
import gym
def wrap_pixels(
env: gym.Env,
action_repeat: int,
image_size: int = 84,
num_stack: Optional[int] = 3,
camera_id: int = 0,
pixel_keys: Tuple[str, ...] = ("pixels",),
) -> gym.Env:
if action_repeat > 1:
env = RepeatAction(env, action_repeat)
| env = UniversalSeed(env) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: daily-demos/ai-meeting-assistant
# Path: server/llm/assistant.py
class Assistant(ABC):
"""Abstract class defining methods that should be implemented by any assistant"""
@abstractmethod
def register_new_context(self, new_text: str,
name: list[str] = None) -> str:
"""Registers new context (usually a transcription line)."""
@abstractmethod
async def query(self, custom_query: str) -> str:
"""Runs a query against the assistant and returns the answer."""
@abstractmethod
def get_clean_transcript(self) -> str:
"""Returns latest clean transcript."""
@abstractmethod
async def cleanup_transcript(self) -> str:
"""Cleans up transcript from raw context."""
@abstractmethod
def destroy(self) -> str:
"""Destroys the assistant."""
# Path: server/llm/assistant.py
class NoContextError(Exception):
"""Raised when a query is made but no context is available"""
def __init__(self):
m = "No context available."
super().__init__(m)
# Path: server/llm/openai_assistant.py
import asyncio
import logging
import threading
from collections import deque
from openai import OpenAI
from openai.types.beta import Assistant
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionSystemMessageParam, \
ChatCompletionUserMessageParam
from server.llm.assistant import Assistant, NoContextError
def probe_api_key(api_key: str) -> bool:
"""Probes the OpenAI API with the provided key to ensure it is valid."""
try:
client = OpenAI(api_key=api_key)
client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
ChatCompletionUserMessageParam(
content="This is a test",
role="user")],
)
return True
except Exception as e:
print(f"Failed to probe OpenAI API key: {e}")
return False
class OpenAIAssistant(Assistant):
"""Class that implements assistant features using the OpenAI API"""
_client: OpenAI = None
_oai_assistant_id: int = None
_oai_summary_thread_id: int = None
_model_name: str = None
_logger: logging.Logger = None
# For now, just store context in memory.
_raw_context: deque([ChatCompletionMessageParam]) = None
_clean_transcript: str = None
_clean_transcript_running: bool = False
_summary_context: str = None
# Process 20 context items at a time.
_transcript_batch_size: int = 25
_default_transcript_prompt = ChatCompletionSystemMessageParam(content="""
Using the exact transcript provided in the previous messages, convert it into a cleaned-up, paragraphed format. It is crucial that you strictly adhere to the content of the provided transcript without adding or modifying any of the original dialogue. Your tasks are to:
1. Correct punctuation and spelling mistakes.
2. Merge broken sentences into complete ones.
3. Remove timestamps and transcript types.
4. Clearly indicate the speaker's name at the beginning of their dialogue.
Do not add any new content or dialogue that was not present in the original transcript. The focus is on cleaning and reformatting the existing content for clarity and readability.
""",
role="system")
_default_prompt = """
Primary Instruction:
Based on the provided meeting transcripts, please create a concise summary.
Your summary should include:
1. Key discussion points.
2. Decisions made.
3. Action items assigned.
Keep the summary within six sentences, ensuring it captures the essence of the conversation. Structure it in clear, digestible parts for easy understanding. Rely solely on information from the transcript; do not infer or add information not explicitly mentioned. Exclude any square brackets, tags, or timestamps from the summary. Instead of re-parsing the entire context, use previous summaries you've generated to inform the completion of each new summary. Each summary should be holistic and represent the entire call.
"""
def __init__(self, api_key: str, model_name: str = None,
logger: logging.Logger = None):
if not api_key:
raise Exception("OpenAI API key not provided, but required.")
self._raw_context = deque()
self._summary_context = ""
self._clean_transcript = ""
self._logger = logger
if not model_name:
model_name = "gpt-4-1106-preview"
self._model_name = model_name
self._client = OpenAI(
api_key=api_key,
)
self._oai_assistant_id = self.get_or_create_assistant(model_name)
def get_or_create_assistant(self, model_name) -> str:
"""Gets or creates an OpenAI assistant"""
all_assistants = self._client.beta.assistants.list()
for assistant in all_assistants.data:
if assistant.name == _assistant_name and assistant.instructions == self._default_prompt:
return assistant.id
return self._client.beta.assistants.create(name=_assistant_name, description="Daily meeting summary assistant",
instructions=self._default_prompt,
model=model_name).id
def destroy(self):
"""Destroys the assistant and relevant resources"""
self._logger.info(
"Destroying thread (%s) and assistant (%s)",
self._oai_summary_thread_id,
self._oai_assistant_id)
bc = self._client.beta
if self._oai_summary_thread_id:
bc.threads.delete(self._oai_summary_thread_id)
if self._oai_assistant_id:
bc.assistants.delete(self._oai_assistant_id)
def register_new_context(self, new_text: str, metadata: list[str] = None):
"""Registers new context (usually a transcription line)."""
content = self._compile_ctx_content(new_text, metadata)
user_msg = ChatCompletionUserMessageParam(content=content, role="user")
self._raw_context.append(user_msg)
def get_clean_transcript(self) -> str:
"""Returns latest clean transcript."""
return self._clean_transcript
async def cleanup_transcript(self) -> str:
"""Cleans up transcript from raw context."""
if self._clean_transcript_running:
raise Exception("Clean transcript process already running")
# Set this bool to ensure only one cleanup process
# is running at a time.
self._clean_transcript_running = True
if len(self._raw_context) == 0:
self._clean_transcript_running = False
| raise NoContextError() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Kushalhk/AutoFilter
# Path: database/ia_filterdb.py
async def get_search_results(chat_id, query, file_type=None, max_results=10, offset=0, filter=False):
"""For given query return (results, next_offset)"""
if chat_id is not None:
settings = await get_settings(int(chat_id))
try:
if settings['max_btn']:
max_results = 10
else:
max_results = int(MAX_B_TN)
except KeyError:
await save_group_settings(int(chat_id), 'max_btn', False)
settings = await get_settings(int(chat_id))
if settings['max_btn']:
max_results = 10
else:
max_results = int(MAX_B_TN)
query = query.strip()
#if filter:
#better ?
#query = query.replace(' ', r'(\s|\.|\+|\-|_)')
#raw_pattern = r'(\s|_|\-|\.|\+)' + query + r'(\s|_|\-|\.|\+)'
if not query:
raw_pattern = '.'
elif ' ' not in query:
raw_pattern = r'(\b|[\.\+\-_])' + query + r'(\b|[\.\+\-_])'
else:
raw_pattern = query.replace(' ', r'.*[\s\.\+\-_]')
try:
regex = re.compile(raw_pattern, flags=re.IGNORECASE)
except:
return []
if USE_CAPTION_FILTER:
filter = {'$or': [{'file_name': regex}, {'caption': regex}]}
else:
filter = {'file_name': regex}
if file_type:
filter['file_type'] = file_type
total_results = await Media.count_documents(filter)
next_offset = offset + max_results
if next_offset > total_results:
next_offset = ''
cursor = Media.find(filter)
# Sort by recent
cursor.sort('$natural', -1)
# Slice files according to offset and max results
cursor.skip(offset).limit(max_results)
# Get list of files
files = await cursor.to_list(length=max_results)
return files, next_offset, total_results
# Path: utils.py
async def is_subscribed(bot, query):
try:
user = await bot.get_chat_member(AUTH_CHANNEL, query.from_user.id)
except UserNotParticipant:
pass
except Exception as e:
logger.exception(e)
else:
if user.status != enums.ChatMemberStatus.BANNED:
return True
return False
# Path: utils.py
def get_size(size):
"""Get size in readable format"""
units = ["Bytes", "KB", "MB", "GB", "TB", "PB", "EB"]
size = float(size)
i = 0
while size >= 1024.0 and i < len(units):
i += 1
size /= 1024.0
return "%.2f %s" % (size, units[i])
# Path: utils.py
class temp(object):
BANNED_USERS = []
BANNED_CHATS = []
ME = None
CURRENT=int(os.environ.get("SKIP", 2))
CANCEL = False
MELCOW = {}
U_NAME = None
B_NAME = None
GETALL = {}
SHORT = {}
SETTINGS = {}
# Path: info.py
CACHE_TIME = int(environ.get('CACHE_TIME', 99999))
# Path: info.py
AUTH_USERS = (auth_users + ADMINS) if auth_users else []
# Path: info.py
AUTH_CHANNEL = int(auth_channel) if auth_channel and id_pattern.search(auth_channel) else None
# Path: info.py
CUSTOM_FILE_CAPTION = environ.get("CUSTOM_FILE_CAPTION", f"{script.CAPTION}")
# Path: database/connections_mdb.py
async def active_connection(user_id):
query = mycol.find_one(
{ "_id": user_id },
{ "_id": 0, "group_details": 0 }
)
if not query:
return None
group_id = query['active_group']
return int(group_id) if group_id != None else None
# Path: plugins/inline.py
import logging
from pyrogram import Client, emoji, filters
from pyrogram.errors.exceptions.bad_request_400 import QueryIdInvalid
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, InlineQueryResultCachedDocument, InlineQuery
from database.ia_filterdb import get_search_results
from utils import is_subscribed, get_size, temp
from info import CACHE_TIME, AUTH_USERS, AUTH_CHANNEL, CUSTOM_FILE_CAPTION
from database.connections_mdb import active_connection
logger = logging.getLogger(__name__)
cache_time = 0 if AUTH_USERS or AUTH_CHANNEL else CACHE_TIME
async def inline_users(query: InlineQuery):
if AUTH_USERS:
if query.from_user and query.from_user.id in AUTH_USERS:
return True
else:
return False
if query.from_user and query.from_user.id not in temp.BANNED_USERS:
return True
return False
@Client.on_inline_query()
async def answer(bot, query):
"""Show search results for given inline query"""
chat_id = await active_connection(str(query.from_user.id))
if not await inline_users(query):
await query.answer(results=[],
cache_time=0,
switch_pm_text='okDa',
switch_pm_parameter="hehe")
return
if AUTH_CHANNEL and not await is_subscribed(bot, query):
await query.answer(results=[],
cache_time=0,
switch_pm_text='You have to subscribe my channel to use the bot',
switch_pm_parameter="subscribe")
return
results = []
if '|' in query.query:
string, file_type = query.query.split('|', maxsplit=1)
string = string.strip()
file_type = file_type.strip().lower()
else:
string = query.query.strip()
file_type = None
offset = int(query.offset or 0)
reply_markup = get_reply_markup(query=string)
files, next_offset, total = await get_search_results(
chat_id,
string,
file_type=file_type,
max_results=10,
offset=offset)
for file in files:
title=file.file_name
size=get_size(file.file_size)
f_caption=file.caption
| if CUSTOM_FILE_CAPTION: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: tiendatnguyen-vision/Orbit-symmetrize
# Path: RotatedMNIST/LPS/emlp-pytorch/tests/equivariance_tests.py
def rel_error(t1, t2):
""" Computes the relative error of two tensors. """
error = torch.sqrt(torch.mean(torch.abs(t1-t2)**2))
scale = torch.sqrt(torch.mean(torch.abs(t1)**2)) + \
torch.sqrt(torch.mean(torch.abs(t2)**2))
return error/torch.clamp(scale, min=1e-7)
# Path: RotatedMNIST/LPS/emlp-pytorch/tests/equivariance_tests.py
def scale_adjusted_rel_error(t1, t2, g):
""" Computes the relative error of two tensors t1 and t2 under the action of g. """
error = torch.sqrt(torch.mean(torch.abs(t1-t2)**2))
tscale = torch.sqrt(torch.mean(torch.abs(t1)**2)) + \
torch.sqrt(torch.mean(torch.abs(t2)**2))
gscale = torch.sqrt(torch.mean(torch.abs(g-torch.eye(g.size(-1), device=t1.device))**2))
scale = torch.max(tscale, gscale)
return error/torch.clamp(scale, min=1e-7)
# Path: RotatedMNIST/LPS/emlp-pytorch/tests/model_tests.py
import torch
from torch.utils.data import DataLoader
from oil.utils.utils import FixedNumpySeed, FixedPytorchSeed
from emlp_pytorch.nn import EMLP
from emlp_pytorch.groups import S, SO, DirectProduct
from emlp_pytorch.reps import vis, sparsify_basis, V, Rep, LazyKron, T
from .equivariance_tests import rel_error, scale_adjusted_rel_error
""" Tests for the EMLP model."""
def equivariance_err(model, mb, repin, repout, group):
""" Computes the equivariance error of a model on a minibatch mb. """
x, y = mb
gs = group.samples(x.size(0))
rho_gin = torch.vmap(repin(group).rho_dense)(gs)
rho_gout = torch.vmap(repout(group).rho_dense)(gs)
y1 = model((rho_gin@x[..., None])[..., 0])
y2 = (rho_gout@model(x)[..., None])[..., 0]
return scale_adjusted_rel_error(y1, y2, gs)
def get_dsmb(dsclass, device='cpu'):
""" Returns a dataset and minibatch for a given dataset class. """
seed = 2021
bs = 50
with FixedNumpySeed(seed), FixedPytorchSeed(seed):
ds = dsclass(100)
ds = ds.to(device)
dataloader = DataLoader(ds, batch_size=min(bs, len(ds)), num_workers=0, pin_memory=False)
mb = next(iter(dataloader))
return ds, mb
def test_init_forward_and_equivariance(dsclass, device='cpu'):
""" Tests that the model can be initialized, forward pass is correct,
and equivariance is correct. """
network = EMLP
ds, mb = get_dsmb(dsclass, device)
model = network(ds.rep_in, ds.rep_out, group=ds.symmetry).to(device)
assert equivariance_err(model, mb, ds.rep_in, ds.rep_out, ds.symmetry) < 1e-4, \
"EMLP failed equivariance test"
def test_utilities(device='cpu'):
""" Tests that the utilities work. """
W = V(SO(3).to(device))
# W = V(DirectProduct(SO(3).to(device), S(6).to(device)))
vis(W, W)
Q = (W**2 >> W).equivariant_basis()
SQ = sparsify_basis(Q)
A = SQ@(1+torch.arange(SQ.size(-1), device=device)).to(torch.float)
nunique = len(torch.unique(torch.abs(A)))
assert nunique in (SQ.size(-1), SQ.size(-1) + 1), "Sparsify failes on SO(3) T3"
def test_bespoke_representations(device='cpu'):
""" Tests that bespoke representations work. """
class ProductSubRep(Rep):
""" A representation of a product group G = G1 x G2 as a sum of two subrepresentations """
def __init__(self, G, subgroup_id, size):
""" Produces the representation of the subgroup of G = G1 x G2
with the index subgroup_id in {0,1} specifying G1 or G2.
Also requires specifying the size of the representation given by G1.d or G2.d """
super().__init__()
self.G = G
self.index = subgroup_id
self._size = size
self.device = device
def __repr__(self):
return "V_"+str(self.G).split('x')[self.index]
def __hash__(self):
return hash((type(self), (self.G, self.index)))
def size(self):
return self._size
def rho(self, M):
# Given that M is a LazyKron object, we can just get the argument
return M.Ms[self.index]
def drho(self, A):
return A.Ms[self.index]
def __call__(self, G):
# adding this will probably not be necessary in a future release,
# necessary now because rep is __call__ed in nn.EMLP constructor
assert self.G == G
return self
G1, G2 = SO(3).to(device), S(5).to(device)
G = G1 * G2
VSO3 = ProductSubRep(G, 0, G1.d)
VS5 = ProductSubRep(G, 1, G2.d)
Vin = VS5 + V(G)
Vout = VSO3
str(Vin >> Vout)
model = EMLP(Vin, Vout, group=G)
model.to(device)
input_point = torch.randn(Vin.size(), device=device)*10
lazy_G_sample = LazyKron([G1.sample(), G2.sample()])
out1 = model(Vin.rho(lazy_G_sample)@input_point)
out2 = Vout.rho(lazy_G_sample)@model(input_point)
| assert rel_error(out1, out2) < 1e-4, "EMLP equivariance fails on bespoke productsubrep" |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: crizbae/PictoPlan
# Path: backend/mongo_api/app/server/database.py
MONGO_URI = config("MONGO_URI")
def item_helper(item) -> dict:
def ret_link(item) -> dict:
async def retrieve_all_items():
async def retrieve_item(item_id: str):
async def retrieve_links(session_id: str):
async def update_item_in_db(item_id: str, updated_item: dict) -> bool:
async def delete_item_from_db(item_id: str) -> bool:
# Path: backend/mongo_api/app/server/models/item.py
class Item(BaseModel):
Title: str
SessionId: str
Objective: str
Materials: str
Procedure: Dict[str, str]
Assessment: str
# Path: backend/mongo_api/app/server/database.py
async def retrieve_all_items():
items = []
cursor = collection.find()
for item in cursor:
items.append(item_helper(item))
return items
# Path: backend/mongo_api/app/server/database.py
async def retrieve_item(item_id: str):
cursor = collection.find_one({"_id": ObjectId(item_id)})
return [ret_link(cursor)]
# Path: backend/mongo_api/app/server/database.py
async def update_item_in_db(item_id: str, updated_item: dict) -> bool:
cursor = collection.update_one({"_id": ObjectId(item_id)}, {"$set": updated_item})
return cursor.modified_count > 0
# Path: backend/mongo_api/app/server/database.py
async def delete_item_from_db(item_id: str) -> bool:
cursor = collection.delete_one({"_id": ObjectId(item_id)})
return cursor.deleted_count > 0
# Path: backend/mongo_api/app/server/database.py
async def retrieve_links(session_id: str):
cursor = collection.find({"SessionId": session_id})
links = []
for item in cursor:
links.append(str(item["_id"]))
return links
# Path: backend/mongo_api/app/server/routes/item_routes.py
from fastapi import APIRouter, Depends, HTTPException
from ..database import collection
from ..models.item import Item
from ..database import retrieve_all_items, retrieve_item, update_item_in_db, delete_item_from_db, retrieve_links
router = APIRouter()
@router.post("/items/")
def create_item(item: Item):
item_dict = item.dict()
inserted_item = collection.insert_one(item_dict)
item_id = str(inserted_item.inserted_id)
del item_dict["_id"]
item_dict["id"] = item_id
return item_dict
@router.get("/items/")
async def get_all_items():
items = await retrieve_all_items()
return items
# get by frontend UUID
@router.get("/items/session/{session_id}")
async def get_item_by_session_id(session_id: str):
item = await retrieve_links(session_id)
if len(item) == 0:
raise HTTPException(status_code=404, detail="Items not found")
return item
# get by link
@router.get("/items/{item_id}")
async def get_item_by_id(item_id: str):
item = await retrieve_item(item_id)
if len(item) == 0:
raise HTTPException(status_code=404, detail="Item not found")
return item
@router.put("/items/{item_id}")
async def update_item(item_id: str, updated_item: Item):
updated_item = updated_item.dict()
success = await update_item_in_db(item_id, updated_item)
if not success:
raise HTTPException(status_code=404, detail="Item not found")
return {**updated_item, "id": item_id}
@router.delete("/items/{item_id}")
async def delete_item(item_id: str):
| success = await delete_item_from_db(item_id) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: xenxxxx/BitPay-Crypto-Signal-Trading-Bot
# Path: tests/conftest.py
CURRENT_TEST_STRATEGY = 'StrategyTestV3'
# Path: tests/conftest.py
def create_mock_trades(fee, is_short: Optional[bool] = False, use_db: bool = True):
"""
Create some fake trades ...
:param is_short: Optional bool, None creates a mix of long and short trades.
"""
def add_trade(trade):
if use_db:
Trade.session.add(trade)
else:
LocalTrade.add_bt_trade(trade)
is_short1 = is_short if is_short is not None else True
is_short2 = is_short if is_short is not None else False
# Simulate dry_run entries
trade = mock_trade_1(fee, is_short1)
add_trade(trade)
trade = mock_trade_2(fee, is_short1)
add_trade(trade)
trade = mock_trade_3(fee, is_short2)
add_trade(trade)
trade = mock_trade_4(fee, is_short2)
add_trade(trade)
trade = mock_trade_5(fee, is_short2)
add_trade(trade)
trade = mock_trade_6(fee, is_short1)
add_trade(trade)
if use_db:
Trade.commit()
# Path: tests/conftest_trades.py
MOCK_TRADE_COUNT = 6
# Path: tests/data/test_btanalysis.py
from datetime import datetime, timedelta, timezone
from pathlib import Path
from unittest.mock import MagicMock
from pandas import DataFrame, DateOffset, Timestamp, to_datetime
from freqtrade.configuration import TimeRange
from freqtrade.constants import LAST_BT_RESULT_FN
from freqtrade.data.btanalysis import (BT_DATA_COLUMNS, analyze_trade_parallelism,
extract_trades_of_period, get_latest_backtest_filename,
get_latest_hyperopt_file, load_backtest_data,
load_backtest_metadata, load_trades, load_trades_from_db)
from freqtrade.data.history import load_data, load_pair_history
from freqtrade.data.metrics import (calculate_cagr, calculate_calmar, calculate_csum,
calculate_expectancy, calculate_market_change,
calculate_max_drawdown, calculate_sharpe, calculate_sortino,
calculate_underwater, combine_dataframes_with_mean,
create_cum_profit)
from freqtrade.exceptions import OperationalException
from freqtrade.util import dt_utc
from tests.conftest import CURRENT_TEST_STRATEGY, create_mock_trades
from tests.conftest_trades import MOCK_TRADE_COUNT
import pytest
def test_get_latest_backtest_filename(testdatadir, mocker):
with pytest.raises(ValueError, match=r"Directory .* does not exist\."):
get_latest_backtest_filename(testdatadir / 'does_not_exist')
with pytest.raises(ValueError,
match=r"Directory .* does not seem to contain .*"):
get_latest_backtest_filename(testdatadir)
testdir_bt = testdatadir / "backtest_results"
res = get_latest_backtest_filename(testdir_bt)
assert res == 'backtest-result.json'
res = get_latest_backtest_filename(str(testdir_bt))
assert res == 'backtest-result.json'
mocker.patch("freqtrade.data.btanalysis.json_load", return_value={})
with pytest.raises(ValueError, match=r"Invalid '.last_result.json' format."):
get_latest_backtest_filename(testdir_bt)
def test_get_latest_hyperopt_file(testdatadir):
res = get_latest_hyperopt_file(testdatadir / 'does_not_exist', 'testfile.pickle')
assert res == testdatadir / 'does_not_exist/testfile.pickle'
res = get_latest_hyperopt_file(testdatadir.parent)
assert res == testdatadir.parent / "hyperopt_results.pickle"
res = get_latest_hyperopt_file(str(testdatadir.parent))
assert res == testdatadir.parent / "hyperopt_results.pickle"
# Test with absolute path
with pytest.raises(
OperationalException,
match="--hyperopt-filename expects only the filename, not an absolute path."):
get_latest_hyperopt_file(str(testdatadir.parent), str(testdatadir.parent))
def test_load_backtest_metadata(mocker, testdatadir):
res = load_backtest_metadata(testdatadir / 'nonexistant.file.json')
assert res == {}
mocker.patch('freqtrade.data.btanalysis.get_backtest_metadata_filename')
mocker.patch('freqtrade.data.btanalysis.json_load', side_effect=Exception())
with pytest.raises(OperationalException,
match=r"Unexpected error.*loading backtest metadata\."):
load_backtest_metadata(testdatadir / 'nonexistant.file.json')
def test_load_backtest_data_old_format(testdatadir, mocker):
filename = testdatadir / "backtest-result_test222.json"
mocker.patch('freqtrade.data.btanalysis.load_backtest_stats', return_value=[])
with pytest.raises(OperationalException,
match=r"Backtest-results with only trades data are no longer supported."):
load_backtest_data(filename)
def test_load_backtest_data_new_format(testdatadir):
filename = testdatadir / "backtest_results/backtest-result.json"
bt_data = load_backtest_data(filename)
assert isinstance(bt_data, DataFrame)
assert set(bt_data.columns) == set(BT_DATA_COLUMNS)
assert len(bt_data) == 179
# Test loading from string (must yield same result)
bt_data2 = load_backtest_data(str(filename))
assert bt_data.equals(bt_data2)
# Test loading from folder (must yield same result)
bt_data3 = load_backtest_data(testdatadir / "backtest_results")
assert bt_data.equals(bt_data3)
with pytest.raises(ValueError, match=r"File .* does not exist\."):
load_backtest_data("filename" + "nofile")
with pytest.raises(ValueError, match=r"Unknown dataformat."):
load_backtest_data(testdatadir / "backtest_results" / LAST_BT_RESULT_FN)
def test_load_backtest_data_multi(testdatadir):
filename = testdatadir / "backtest_results/backtest-result_multistrat.json"
for strategy in ('StrategyTestV2', 'TestStrategy'):
bt_data = load_backtest_data(filename, strategy=strategy)
assert isinstance(bt_data, DataFrame)
assert set(bt_data.columns) == set(
BT_DATA_COLUMNS)
assert len(bt_data) == 179
# Test loading from string (must yield same result)
bt_data2 = load_backtest_data(str(filename), strategy=strategy)
assert bt_data.equals(bt_data2)
with pytest.raises(ValueError, match=r"Strategy XYZ not available in the backtest result\."):
load_backtest_data(filename, strategy='XYZ')
with pytest.raises(ValueError, match=r"Detected backtest result with more than one strategy.*"):
load_backtest_data(filename)
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize('is_short', [False, True])
def test_load_trades_from_db(default_conf, fee, is_short, mocker):
| create_mock_trades(fee, is_short) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ssajedi/SAiF-GPT
# Path: utils.py
def augment_prompt(prompt,ref_doc):
def extract_pdf_text(file):
# Path: utils.py
def extract_pdf_text(file):
"""
Extracts text paragraphs from a PDF file.
"""
pdf_reader = PyPDF2.PdfReader(file)
pdf_dict={}
for ip in range(len(pdf_reader.pages)):
pdf_dict[ip] = pdf_reader.pages[ip].extract_text()
dataset = [pdf_dict[ip] for ip in range(len(pdf_reader.pages))]
return pdf_dict,dataset
# Path: text_effects.py
def highlight_phrases_in_paragraph(paragraph, phrases_to_colors):
"""
Highlights specific phrases within a paragraph in Streamlit markdown using generated pale colors and rounded edges.
Args:
- paragraph (str): The paragraph of text where phrases will be highlighted.
- phrases_to_colors (dict): Dictionary where keys are phrases to be highlighted. Colors will be generated automatically.
Returns:
- None: Directly renders the HTML in Streamlit using markdown.
"""
# Filter out phrases that don't exist in the paragraph
phrases_present = {phrase: color for phrase, color in phrases_to_colors.items() if re.search(re.escape(phrase), paragraph, re.IGNORECASE)}
# Sort phrases by length in descending order to handle nested phrases
phrases_sorted = sorted(phrases_present.keys(), key=len, reverse=True)
# Initialize a hue value
hue = 0
hue_increment = 1 / len(phrases_sorted) if phrases_sorted else 0 # Prevent division by zero
# Escape phrases for regex and replace them with highlighted HTML
for phrase in phrases_sorted:
color_code = generate_pale_color(hue)
hue += hue_increment # Increment hue to get a different color
escaped_phrase = re.escape(phrase)
pattern = r'\b' + escaped_phrase + r'\b' # Use word boundaries
replacement = (
f'<span style="background-color: {color_code}; '
f'border-radius: 0.5em; padding: 0.3em 0.6em;">{phrase}🔒</span>'
)
paragraph = re.sub(pattern, replacement, paragraph, flags=re.IGNORECASE)
# Render the HTML in Streamlit using the markdown function with unsafe_allow_html set to True
# st.markdown(paragraph, unsafe_allow_html=True)
return paragraph
# Path: bin/main.py
import streamlit as st
import random
import time
import openai
import openai
import streamlit as st
from utils import anonymize_text, deanonymize_text, chatbot_response
from utils import extract_pdf_text
from text_effects import highlight_phrases_in_paragraph
from DetectEntity import DetectEntity
st.title("AInonymous")
system_prompt="""You are a helpful assistant, your task is to review an uploaded document\
uploaded by a user.\
The user query is delimited by triple asterisks.\
The reference documents in that message are delimited with triple backticks.\
A user might ask follow up questions.
"""
# add a selectbox to the sidebar
st.sidebar.multiselect("Entity list", ["email", "phone",'location'], ["email", "phone","location"])
# add a clear button to the sidebar
if st.sidebar.button("Clear"):
st.session_state.chat_hist = []
st.session_state.messages = []
st.session_state.cls = None
# add
# add a n upload pdf button to the sidebar
uploaded_file = st.sidebar.file_uploader("Choose a PDF file", accept_multiple_files=False)
if uploaded_file is not None:
| _,chunks = extract_pdf_text(uploaded_file) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: awslabs/optimizing-multitask-training-through-dynamic-pipelines
# Path: dynapipe/pipe/data_loader.py
def _get_from_shared_kv_store(
kv_store: RedisKVStore,
key: str,
reader_idx: int,
n_total_readers: int,
decode: bool = True,
logger=None,
):
reader_count_key = key + "_rc"
reader_ack_key = key + "_r{}_ack".format(reader_idx)
# wait for reader ack
if logger is not None:
logger.debug("Waiting for reader ack key: {}".format(reader_ack_key))
kv_store.get(reader_ack_key)
if logger is not None:
logger.debug(
"Got reader ack key: {}, waiting for data key: {}".format(
reader_ack_key, key
)
)
data = kv_store.get(key)
if logger is not None:
logger.debug("Removing reader ack key: {}".format(reader_ack_key))
# remove reader ack
_checked_delete_key(kv_store, reader_ack_key, logger=logger)
# get reader count
reader_count = kv_store.add(reader_count_key, 1)
if reader_count == n_total_readers:
if logger is not None:
logger.debug(
"Last reader, reset reader count: {}".format(reader_count_key)
)
# reset reader count
result_readers = kv_store.add(reader_count_key, -n_total_readers)
assert result_readers == 0
if logger is not None:
logger.debug("Last reader, remove data key: {}".format(key))
# remove data key
_checked_delete_key(kv_store, key, logger=logger)
if logger is not None:
logger.debug("Last reader, set ack key: {}".format(key + "_ack"))
# set all reader ack keys
keys_to_reset = [
key + "_r{}_ack".format(i) for i in range(n_total_readers)
]
if logger is not None:
logger.debug("Last reader, reset keys: {}".format(keys_to_reset))
for reset_key in keys_to_reset:
val = kv_store.add(reset_key, 1)
# make sure the key is set
got_val = int(kv_store.get(reset_key).decode())
if not val == got_val:
raise RuntimeError(
"Failed to set reader ack key: {}".format(reset_key)
)
if logger is not None:
logger.debug("Set reader ack key: {}".format(reset_key))
# set data ack key
kv_store.add(key + "_ack", 1)
if decode:
return data.decode()
return data
# Path: dynapipe/pipe/data_loader.py
def _init_kv_store(is_master, logger=None):
host = os.environ.get("DYNAPIPE_KV_HOST", "localhost")
port = os.environ.get("DYNAPIPE_KV_PORT", 29500)
if logger is not None:
logger.debug(
"Init kv store, is_master: {}, host: {}, port: {}".format(
is_master, host, port
)
)
# kv_store = torch.distributed.TCPStore(
# "127.0.0.1",
# port,
# is_master=is_master,
# timeout=timedelta(seconds=KVSTORE_TIMEOUT),
# )
kv_store = RedisKVStore(host, port, is_master=is_master)
return kv_store, host, port
# Path: dynapipe/pipe/data_loader.py
def _put_to_shared_kv_store(
kv_store: RedisKVStore, key: str, data, logger=None
):
# put execution plan into local kv store
ack_key = key + "_ack"
if logger is not None:
logger.debug("Wait for data ack key: {}".format(ack_key))
# wait for ack key
kv_store.get(ack_key)
# remove ack key
_checked_delete_key(kv_store, ack_key, logger=logger)
if logger is not None:
logger.debug("Set data key: {}".format(key))
# set data key
kv_store.set(key, data)
# Path: tests/test_kv_store.py
import multiprocessing as mp
import time
import traceback
import traceback
from dynapipe.pipe.data_loader import (
_get_from_shared_kv_store,
_init_kv_store,
_put_to_shared_kv_store,
)
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Note: this test requires torch
# to run this test, exec:
# DYNAPIPE_DEBUG=DEBUG DYNAPIPE_LOGGING_DEBUG_DIR=./test_debug \
# torchrun --standalone --nnodes=1 --nproc_per_node=1 test_kv_store.py
def _producer_process(max_iters, buffer_size=32):
try:
kv_store, _, _ = _init_kv_store(is_master=True)
# set all ack keys
for i in range(buffer_size):
kv_store.set(f"key_{i}_ack".format(i), "1")
kv_store.set(f"key_{i}_r0_ack".format(i), "1")
for i in range(max_iters):
key = "key_{}".format(i % buffer_size)
payload = str(i)
_put_to_shared_kv_store(kv_store, key, payload)
print("[producer] put key: {}".format(key), flush=True)
time.sleep(2)
except Exception as e:
traceback.print_exc()
raise e
def _consumer_process(max_iters, buffer_size=32):
try:
kv_store, _, _ = _init_kv_store(is_master=False)
for i in range(max_iters):
key = "key_{}".format(i % buffer_size)
| payload = _get_from_shared_kv_store( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dask-contrib/dask-databricks
# Path: dask_databricks/databrickscluster.py
class DatabricksCluster(Cluster):
"""Connect to a Dask cluster deployed via databricks."""
def __init__(
self,
loop: Optional[IOLoop] = None,
asynchronous: bool = False,
):
self.spark_local_ip = os.getenv("SPARK_LOCAL_IP")
if self.spark_local_ip is None:
raise KeyError(
"Unable to find expected environment variable SPARK_LOCAL_IP. "
"Are you running this on a Databricks driver node?"
)
try:
name = spark.conf.get("spark.databricks.clusterUsageTags.clusterId")
except AttributeError:
name = "unknown-databricks-" + uuid.uuid4().hex[:10]
super().__init__(name=name, loop=loop, asynchronous=asynchronous)
if not self.called_from_running_loop:
self._loop_runner.start()
self.sync(self._start)
async def _start(self):
self.scheduler_comm = rpc(f"{self.spark_local_ip}:8786")
await super()._start()
@property
def dashboard_link(self):
cluster_id = spark.conf.get("spark.databricks.clusterUsageTags.clusterId")
org_id = spark.conf.get("spark.databricks.clusterUsageTags.orgId")
return f"https://dbc-dp-{org_id}.cloud.databricks.com/driver-proxy/o/{org_id}/{cluster_id}/8087/status"
# Path: dask_databricks/databrickscluster.py
def get_client():
"""Get a Dask client connected to a Databricks cluster."""
return DatabricksCluster().get_client()
# Path: dask_databricks/tests/test_databricks.py
import os
import pytest
from dask.distributed import Client
from distributed.deploy import Cluster, LocalCluster
from dask_databricks import DatabricksCluster, get_client
@pytest.fixture(scope="session")
def dask_cluster():
"""Start a LocalCluster to simulate the cluster that would be started on Databricks."""
return LocalCluster(scheduler_port=8786)
@pytest.fixture
def remove_spark_local_ip():
original_spark_local_ip = os.getenv("SPARK_LOCAL_IP")
if original_spark_local_ip:
del os.environ["SPARK_LOCAL_IP"]
yield None
if original_spark_local_ip:
os.environ["SPARK_LOCAL_IP"] = original_spark_local_ip
@pytest.fixture
def set_spark_local_ip():
original_spark_local_ip = os.getenv("SPARK_LOCAL_IP")
os.environ["SPARK_LOCAL_IP"] = "127.0.0.1"
yield None
if original_spark_local_ip:
os.environ["SPARK_LOCAL_IP"] = original_spark_local_ip
else:
del os.environ["SPARK_LOCAL_IP"]
def test_databricks_cluster_raises_key_error_when_initialised_outside_of_databricks(remove_spark_local_ip):
with pytest.raises(KeyError):
| DatabricksCluster() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: indiefan/king_smith
# Path: custom_components/king_smith/const.py
DOMAIN = "king_smith"
# Path: custom_components/king_smith/walking_pad.py
class WalkingPadApi:
"""Walkingpad device."""
def __init__(self, name: str, ble_device: BLEDevice) -> None:
"""Create a new walking pad api instance."""
self._name = name
self._ble_device = ble_device
self._ctrl = Controller()
self._callbacks = []
self._status_lock = False
self._last_cmd_time = time.time()
self._connected = False
self._moving = False
self._speed = 0
self._distance = 0
self._register_controller_callbacks()
def _register_controller_callbacks(self):
self._ctrl.handler_cur_status = self._on_status_update
def _begin_cmd(self) -> asyncio.Lock:
self._status_lock = True
return asyncio.Lock()
async def _end_cmd(self):
await asyncio.sleep(0.75)
self._last_cmd_time = time.time()
self._status_lock = False
def _on_status_update(self, sender, status: WalkingPadCurStatus) -> None:
"""Update current state."""
# Don't update if we're still running a command or just did (status from device is outdated at first)
if (
self._status_lock
or time.time() - self._last_cmd_time < STATUS_LOCK_ON_CMD_SECONDS
):
return
self._moving = status.speed > 0
self._speed = status.speed
self._distance = status.dist
if len(self._callbacks) > 0:
for callback in self._callbacks:
callback(status)
def register_status_callback(self, callback) -> None:
"""Register a status callback."""
self._callbacks.append(callback)
@property
def mac(self):
"""Mac address."""
return self._ble_device.address
@property
def name(self):
"""Name."""
return self._name
@property
def connected(self):
"""Connected status."""
return self._connected
@property
def moving(self):
"""Whether or not the device is currently moving."""
return self._moving
@property
def speed(self):
"""The current device speed."""
return self._speed
@property
def distance(self):
"""The current device distance."""
return self._distance
async def connect(self) -> None:
"""Connect the device."""
lock = self._begin_cmd()
async with lock:
await self._ctrl.run(self._ble_device)
self._connected = True
await self._end_cmd()
async def disconnect(self) -> None:
"""Disconnect the device."""
lock = self._begin_cmd()
async with lock:
await self._ctrl.disconnect()
self._connected = False
await self._end_cmd()
async def turn_on(self) -> None:
"""Turn on the device."""
lock = self._begin_cmd()
async with lock:
await self._ctrl.switch_mode(WalkingPad.MODE_MANUAL)
await self._end_cmd()
async def turn_off(self) -> None:
"""Turn off the device."""
lock = self._begin_cmd()
async with lock:
await self._ctrl.switch_mode(WalkingPad.MODE_STANDBY)
await self._end_cmd()
async def start_belt(self) -> None:
"""Start the belt."""
lock = self._begin_cmd()
async with lock:
await self._ctrl.start_belt()
self._moving = True
await self._end_cmd()
async def stop_belt(self) -> None:
"""Stop the belt."""
lock = self._begin_cmd()
async with lock:
await self._ctrl.stop_belt()
self._moving = False
await self._end_cmd()
async def change_speed(self, speed: int) -> None:
"""Change the speed."""
lock = self._begin_cmd()
async with lock:
await self._ctrl.change_speed(speed)
self._speed = speed
await self._end_cmd()
async def update_state(self) -> None:
"""Update device state."""
# Grab the lock so we don't run while another command is running
lock = self._begin_cmd()
async with lock:
# Disable status lock so our update triggers a refresh
self._status_lock = False
await self._ctrl.ask_stats()
# Skip callback so we don't reset debouncer
# Path: custom_components/king_smith/coordinator.py
from datetime import datetime
from homeassistant.core import CALLBACK_TYPE, HassJob, HomeAssistant, callback
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from ph4_walkingpad.pad import WalkingPadCurStatus
from .const import DOMAIN
from .walking_pad import WalkingPadApi
import logging
import time
"""The Walking Pad Coordinator."""
_LOGGER = logging.getLogger(__name__)
NEVER_TIME = -86400.0
DEBOUNCE_SECONDS = 1.0
class WalkingPadCoordinator(DataUpdateCoordinator[None]):
"""Data coordinator for receiving Walking Pad updates."""
def __init__(self, hass: HomeAssistant, walking_pad_api: WalkingPadApi) -> None:
"""Initialise the coordinator."""
super().__init__(
hass,
_LOGGER,
| name=DOMAIN, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ndiamant/spice
# Path: spice/utils.py
class BaseLightning(LightningModule):
def _configure_optimizers(self, parameters: Iterator[torch.nn.Parameter]):
opt = optim.AdamW(
parameters, lr=self.hparams.lr, weight_decay=self.hparams.wd,
)
scheduler = optim.lr_scheduler.CosineAnnealingLR(opt, T_max=self.hparams.max_iter)
return [opt], [{"scheduler": scheduler, "interval": "step"}]
def configure_optimizers(self):
return self._configure_optimizers(self.parameters())
def training_step(self, batch: list[torch.Tensor]) -> torch.Tensor:
return self.get_loss(batch, "train")
def validation_step(self, batch: list[torch.Tensor], *args) -> torch.Tensor:
return self.get_loss(batch, "val")
@abstractmethod
def get_loss(self, batch: list[torch.Tensor], prefix: str) -> torch.Tensor:
pass
def epoch_log(
self,
name: str,
value: torch.Tensor,
) -> None:
super().log(name, value, on_epoch=True, on_step=False)
# Path: spice/utils.py
class MLP(nn.Module):
def __init__(self, input_dim: int, hidden: int, n_hidden: int, output_dim: int = None):
super().__init__()
output_dim = output_dim or hidden
self.model = nn.Sequential(
nn.Sequential(nn.Linear(input_dim, hidden), nn.GELU()),
)
for _ in range(n_hidden):
self.model.append(
nn.Sequential(nn.Linear(hidden, hidden), nn.GELU()),
)
self.model.append(nn.Linear(hidden, output_dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
# Path: spice/utils.py
def unique_quantile(
x: torch.Tensor, n_bins: int, first_bin_zero: bool = True,
max_try_n_bins: int = None, verbose: bool = False,
) -> torch.Tensor:
"""binary search to find the right number of bins to yield n_bins unique quantiles"""
if len(x.unique()) == 1:
raise ValueError("Must have more than one value to find unique quantiles.")
def _print(x: Any):
if not verbose:
return
print(x)
min_n_bins = n_bins
max_try_n_bins = max_try_n_bins or 5 * n_bins
og_max_try = max_try_n_bins
unique_quantiles = None
while min_n_bins <= max_try_n_bins:
try_n_bins = (min_n_bins + max_try_n_bins) // 2
first_bin = (0 if first_bin_zero else 1) / try_n_bins
quantiles = torch.linspace(first_bin, 1, try_n_bins)
unique_quantiles = torch.unique(x.quantile(quantiles))
n_unique = unique_quantiles.shape[0]
_print(f"tried {try_n_bins=} and got {len(unique_quantiles)=} / {n_bins}")
if n_unique == n_bins:
_print("found correct number of bins")
return unique_quantiles
if n_unique > n_bins:
max_try_n_bins = try_n_bins - 1
else:
min_n_bins = try_n_bins + 1
if min_n_bins >= og_max_try:
_print(f"Trying again with 2x max try bins")
return unique_quantile(
x, n_bins, first_bin_zero, max_try_n_bins * 2, verbose=verbose,
)
_print(f"Algorithm failed, returning closest guess.")
# likely results in unused bins
if n_unique < n_bins:
start, stop = unique_quantiles[-2:]
lengthened = torch.cat([
unique_quantiles[:-2],
torch.linspace(start, stop, n_bins - n_unique + 2)
])
return lengthened
else:
deltas = unique_quantiles[1:] - unique_quantiles[:-1]
min_delta_idx = deltas.argsort()
idx_to_keep = [
i for i in list(range(n_unique))
if i not in min_delta_idx[:n_unique - n_bins]
]
shortened = unique_quantiles[idx_to_keep]
return shortened
# Path: spice/utils.py
def score_to_q_hat(score: torch.Tensor, alpha: float) -> float:
n = score.shape[0]
quantile = math.ceil((n + 1) * (1 - alpha)) / n
q_hat = score.quantile(quantile).item()
return q_hat
# Path: spice/utils.py
def compute_conformal_metrics(
x_test: torch.Tensor, y_test: torch.Tensor, sizes: torch.Tensor, covered: torch.Tensor,
) -> dict[str, float]:
x_test = x_test.cpu()
y_test = y_test.cpu().squeeze()
sizes = sizes.cpu().squeeze()
covered = covered.cpu().squeeze()
metrics = dict()
metrics["coverage"] = covered.float().mean().item()
metrics["size"] = sizes.mean().item()
metrics["wsc_coverage"] = wsc_unbiased(x_test.cpu().numpy(), covered.cpu().numpy())
# y stratified coverage
y_quantiles = unique_quantile(y_test, n_bins=5, first_bin_zero=False)
discrete_y = torch.bucketize(y_test, y_quantiles)
metrics["y_stratified_coverage"] = stratified_coverage(covered, discrete_y)
# size stratified coverage
try:
size_quantiles = unique_quantile(sizes / sizes.max(), n_bins=5, first_bin_zero=False)
discrete_size = torch.bucketize(sizes, size_quantiles)
metrics["size_stratified_coverage"] = stratified_coverage(covered, discrete_size)
except ValueError:
pass # no unique sizes case
return metrics
# Path: spice/conditional_histogram.py
import copy
import math
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from tqdm import tqdm
from torch import nn
from spice.utils import (
BaseLightning, MLP, unique_quantile,
score_to_q_hat, compute_conformal_metrics,
)
def select_bins(y: torch.Tensor, n_bins: int) -> torch.Tensor:
return unique_quantile(y, n_bins, first_bin_zero=False)
def discretize(y: torch.Tensor, bins: torch.Tensor) -> torch.Tensor:
return torch.bucketize(y.clip(max=bins[-1] - 1e-5), boundaries=bins)
| class ConditionalHist(BaseLightning): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nik-sm/com-hom-emg
# Path: com_hom_emg/data.py
def get_datasets(
per_subj_data: dict,
fold: int,
n_train_subj: int,
n_val_subj: int,
n_test_subj: int,
use_preprocessed_data: bool,
return_subj_names: bool = False, # For testing
) -> Tuple[TensorDataset, TensorDataset, TensorDataset]:
"""
Separate subjects; some for learning embedding, some for val and some for test.
(Unseen subjects for val and test)
Returns train, val, test datasets.
"""
assert fold in range(len(per_subj_data))
def collect_one(_subjs, subj_id_offset=0):
data, labels, is_single, subj_ids = [], [], [], []
for subj_id, subj in enumerate(_subjs):
# Add doubles
x = per_subj_data[subj]["data"]
y = per_subj_data[subj]["labels"]
data.append(x)
labels.append(y)
# NOTE - careful to use bool dtype; used for masking later
# Single gestures have 4 in one component or the other
is_sing = np.logical_or(y[:, 0] == 4, y[:, 1] == 4)
is_single.append(is_sing)
subj_ids.append((subj_id + subj_id_offset) * np.ones(len(x), dtype=int))
data = np.concatenate(data)
if use_preprocessed_data:
data = preprocess(data)
data = torch.from_numpy(data).float()
labels = torch.from_numpy(np.concatenate(labels))
is_single = torch.from_numpy(np.concatenate(is_single))
subj_ids = torch.from_numpy(np.concatenate(subj_ids))
data, labels, is_single, subj_ids = shuffle_together(data, labels, is_single, subj_ids)
return TensorDataset(data, labels, is_single, subj_ids)
subjs = np.roll(list(per_subj_data.keys()), -fold)
if n_train_subj + n_val_subj + n_test_subj != len(subjs):
raise ValueError(f"Num subjects in train/val/test splits must sum to {len(subjs)}")
test_subj = subjs[0:n_test_subj]
val_subj = subjs[n_test_subj : n_test_subj + n_val_subj]
train_subj = subjs[n_test_subj + n_val_subj :]
assert np.intersect1d(train_subj, val_subj).size == 0
assert np.intersect1d(train_subj, test_subj).size == 0
assert np.intersect1d(val_subj, test_subj).size == 0
train_set, val_set, test_set = collect_one(train_subj), collect_one(val_subj), collect_one(test_subj)
logger.info(f"Train subjects: {train_subj}")
logger.info(f"Val subjects: {val_subj}")
logger.info(f"Test subjects: {test_subj}")
logger.info(f"Train on {len(train_subj)} subjects:\n{[x.shape for x in train_set.tensors]}")
logger.info(f"Validate on {len(val_subj)} subjects:\n{[x.shape for x in val_set.tensors]}")
logger.info(f"Test on {len(test_subj)} subjects:\n{[x.shape for x in test_set.tensors]}")
if not return_subj_names:
return train_set, val_set, test_set
return train_set, val_set, test_set, train_subj, val_subj, test_subj
# Path: com_hom_emg/data.py
def get_per_subj_data():
path = PROJECT_PATH / "data" / "combination-gesture-dataset" / "python"
per_subj_data = {}
for subj_idx in range(10):
per_subj_data[subj_idx] = {
"data": np.load(path / f"subj{subj_idx}/data.npy"),
"labels": np.load(path / f"subj{subj_idx}/labels.npy"),
}
return per_subj_data
# Path: tests/test_data.py
import torch
from com_hom_emg.data import get_datasets, get_per_subj_data
def test_get_datasets_disjoint_val_test():
# The subject used for val should be different each time
# Likewise for test
per_subj_data = get_per_subj_data()
all_val_subj = []
all_test_subj = []
n_train = 8
n_val = 1
n_test = 1
expected_train_size = 8 * 1224 # 1224 gestures per subject
expected_val_size = n_val * 1224
expected_test_size = n_test * 1224
def check_contents(dataset, N):
## Check shapes
# data = 8-channel EMG, 962 timesteps (= 500ms at 1926 Hz)
assert dataset.tensors[0].shape == torch.Size([N, 8, 962])
# labels = 2D labels
assert dataset.tensors[1].shape == torch.Size([N, 2])
# is_single = bool labels
assert dataset.tensors[2].shape == torch.Size([N])
# subj_ids = 1d labels
assert dataset.tensors[3].shape == torch.Size([N])
## Check dtypes
assert dataset.tensors[0].dtype == torch.float32
assert dataset.tensors[1].dtype == torch.int64
assert dataset.tensors[2].dtype == torch.bool
assert dataset.tensors[3].dtype == torch.int64
for i in range(10):
| train_set, val_set, test_set, train_subj, val_subj, test_subj = get_datasets( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: alengwenus/ha-sma-ev-charger
# Path: custom_components/smaev/const.py
DOMAIN = "smaev"
# Path: custom_components/smaev/const.py
SMAEV_COORDINATOR = "coordinator"
# Path: custom_components/smaev/const.py
SMAEV_DEVICE_INFO = "device_info"
# Path: custom_components/smaev/const.py
SMAEV_PARAMETER = "parameter"
# Path: custom_components/smaev/const.py
SMAEV_POSSIBLE_VALUES = "possibleValues"
# Path: custom_components/smaev/const.py
SMAEV_VALUE = "value"
# Path: custom_components/smaev/select.py
from dataclasses import dataclass, field
from datetime import datetime
from typing import TYPE_CHECKING
from pysmaev.const import SmaEvChargerParameters
from pysmaev.helpers import get_parameters_channel
from homeassistant.components.select import SelectEntity, SelectEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EntityCategory
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import (
DOMAIN,
SMAEV_COORDINATOR,
SMAEV_DEVICE_INFO,
SMAEV_PARAMETER,
SMAEV_POSSIBLE_VALUES,
SMAEV_VALUE,
)
import logging
"""Select platform for SMA EV Charger integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
@dataclass
class SmaEvChargerSelectEntityDescription(SelectEntityDescription):
"""Describes SMA EV Charger select entities."""
type: str = ""
channel: str = ""
value_mapping: dict = field(default_factory=dict)
SELECT_DESCRIPTIONS: tuple[SmaEvChargerSelectEntityDescription, ...] = (
SmaEvChargerSelectEntityDescription(
key="operating_mode_of_charge_session",
translation_key="operating_mode_of_charge_session",
type=SMAEV_PARAMETER,
channel="Parameter.Chrg.ActChaMod",
value_mapping={
SmaEvChargerParameters.BOOST_CHARGING: "boost_charging",
SmaEvChargerParameters.OPTIMIZED_CHARGING: "optimized_charging",
SmaEvChargerParameters.SETPOINT_CHARGING: "setpoint_charging",
SmaEvChargerParameters.CHARGE_STOP: "charge_stop",
},
entity_registry_enabled_default=True,
),
SmaEvChargerSelectEntityDescription(
key="led_brightness",
translation_key="led_brightness",
type=SMAEV_PARAMETER,
channel="Parameter.Sys.DevSigBri",
value_mapping={
SmaEvChargerParameters.LED_LOW: "low",
SmaEvChargerParameters.LED_AVERAGE: "average",
SmaEvChargerParameters.LED_HIGH: "high",
},
entity_registry_enabled_default=True,
entity_category=EntityCategory.DIAGNOSTIC,
),
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up SMA EV Charger select entities."""
data = hass.data[DOMAIN][config_entry.entry_id]
coordinator = data[SMAEV_COORDINATOR]
device_info = data[SMAEV_DEVICE_INFO]
if TYPE_CHECKING:
assert config_entry.unique_id
entities = []
for entity_description in SELECT_DESCRIPTIONS:
entities.append(
SmaEvChargerSelect(
coordinator, config_entry.unique_id, device_info, entity_description
)
)
async_add_entities(entities)
class SmaEvChargerSelect(CoordinatorEntity, SelectEntity):
"""Representation of a SMA EV Charger select entity."""
entity_description: SmaEvChargerSelectEntityDescription
_attr_has_entity_name = True
def __init__(
self,
coordinator: DataUpdateCoordinator,
config_entry_unique_id: str,
device_info: DeviceInfo,
entity_description: SmaEvChargerSelectEntityDescription,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator)
self.entity_description = entity_description
self._attr_device_info = device_info
self._attr_unique_id = f"{config_entry_unique_id}-{self.entity_description.key}"
self._attr_options = []
self._attr_current_option = None
self.inv_value_mapping = {
value: key for key, value in self.entity_description.value_mapping.items()
}
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
channel = get_parameters_channel(
self.coordinator.data[SMAEV_PARAMETER],
self.entity_description.channel,
)
possible_values = channel[SMAEV_POSSIBLE_VALUES]
| value = channel[SMAEV_VALUE] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: microsoft/promptbase
# Path: azureml/components/src/shared/jsonl_file_utils.py
class JSONLReader:
"""Line-by-line iteration over a JSONL file
Can be used in a 'with' statement, and then iterated over.
The returned value is a decoded JSON object, rather than
the line itself
"""
def __init__(self, jsonl_file: pathlib.Path, encoding: str):
self._file_path = jsonl_file
self._encoding = encoding
self._jf = None
def __iter__(self):
return self
def __next__(self) -> dict[str, Any]:
nxt_line = next(self._jf)
result = json.loads(nxt_line)
return result
def __enter__(self):
self._jf = open(self._file_path, "r", encoding=self._encoding)
return self
def __exit__(self, *args):
self._jf.close()
# Path: azureml/components/src/shared/jsonl_file_utils.py
class JSONLWriter:
def __init__(self, jsonl_file: pathlib.Path | None, encoding: str | None):
self._file_path = jsonl_file
self._encoding = encoding
self._jf = None
def __enter__(self):
if self._file_path is not None:
self._jf = open(self._file_path, "w", encoding=self._encoding)
else:
_logger.info(f"No target path specified, writing to TemporaryFile")
self._jf = tempfile.TemporaryFile(mode="w", encoding="utf-8-sig")
return self
def __exit__(self, *args):
self._jf.close()
def write_line(self, target_object: dict[str, Any]):
nxt_line = json.dumps(target_object)
self._jf.write(nxt_line)
self._jf.write("\n")
# Path: azureml/components/src/shared/logging_utils.py
def get_standard_logger_for_file(
file_path: str, logging_level=logging.INFO
) -> logging.Logger:
_logger = logging.getLogger(pathlib.Path(file_path).name)
_logger.setLevel(logging_level)
sh = logging.StreamHandler(stream=sys.stdout)
sh.setFormatter(
logging.Formatter(
"%(asctime)s - %(name)s [%(levelname)s] : %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
_logger.addHandler(sh)
return _logger
# Path: azureml/components/src/shared/jsonl_utils.py
import json
import pathlib
import tempfile
import traceback
from typing import Any, Callable, Tuple
from .jsonl_file_utils import JSONLReader, JSONLWriter
from .logging_utils import get_standard_logger_for_file
# Copied from Medprompt.... perhaps those utils should go to PyPi?
_logger = get_standard_logger_for_file(__file__)
def line_map(
*,
map_func: Callable[[dict[str, Any]], dict[str, Any] | None],
source_file: pathlib.Path,
dest_file: pathlib.Path,
source_encoding: str,
dest_encoding: str,
error_file: pathlib.Path | None = None,
error_encoding: str | None = None,
max_errors: int = -1,
) -> Tuple[int, int]:
"""Iterate over a JSONL file, applying map_func to each line"""
assert source_file.exists()
successful_lines = 0
error_lines = 0
| with JSONLReader(source_file, source_encoding) as in_file: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: openai/weak-to-strong
# Path: weak_to_strong/common.py
def clear_mem(verbose: bool = False):
"""
This function is used to clear the memory allocated by PyTorch.
It does so by calling the garbage collector to release unused GPU memory.
After clearing the memory, it prints the current amount of memory still allocated by PyTorch (post-clean).
Parameters:
verbose (bool): Whether to print additional information.
"""
gc.collect()
torch.cuda.empty_cache()
print(
f"torch.cuda.memory_allocated: {torch.cuda.memory_allocated(0) / 1024**3:.2f}GB"
)
if verbose:
def try_attr(x, a):
try:
return getattr(x, a)
except:
# amazing that this can cause...
# (AttributeError, OSError, AssertionError, RuntimeError, ModuleNotFoundError)
return None
for obj in gc.get_objects():
if torch.is_tensor(obj) or torch.is_tensor(try_attr(obj, "data")):
print(type(obj), obj.size(), obj.dtype)
# Path: weak_to_strong/eval.py
def eval_model_acc(model: nn.Module, ds: datasets.Dataset, eval_batch_size: int = 16) -> None:
"""
This function evaluates the accuracy of a given model on a given dataset.
Parameters:
model (nn.Module): The model to be evaluated.
ds (datasets.Dataset): The dataset on which the model is to be evaluated.
Returns:
results (list): A list of dictionaries containing the input_ids, ground truth label, predicted label,
accuracy of prediction, logits and soft label for each example in the dataset.
"""
model.eval()
with torch.no_grad():
results = []
# for ex in ds:
for batch in to_batch(ds, eval_batch_size):
# pad input_ids to common length
input_ids = torch.nn.utils.rnn.pad_sequence(
[torch.tensor(ex) for ex in batch["input_ids"]], batch_first=True
).to(model.device if hasattr(model, "device") else "cpu")
labels = batch["soft_label"]
# run forward pass
raw_logits = model(input_ids)
probs = unpack(torch.nn.functional.softmax(raw_logits, dim=-1))
logits = unpack(raw_logits)
preds = np.argmax(probs, axis=-1)
labels = np.argmax(labels, axis=-1)
results.extend(
[
dict(
txt=txt,
input_ids=input_id,
gt_label=label,
hard_label=pred,
acc=label == pred,
logits=logit,
soft_label=prob,
)
for input_id, txt, label, pred, prob, logit in zip(
batch["input_ids"], batch["txt"], labels, preds, probs, logits
)
]
)
accs = [r["acc"] for r in results]
print("Accuracy:", np.mean(accs), "+/-", np.std(accs) / np.sqrt(len(accs)))
return datasets.Dataset.from_list(results)
# Path: weak_to_strong/loss.py
class xent_loss(LossFnBase):
def __call__(
self, logits: torch.Tensor, labels: torch.Tensor, step_frac: float
) -> torch.Tensor:
"""
This function calculates the cross entropy loss between logits and labels.
Parameters:
logits: The predicted values.
labels: The actual values.
step_frac: The fraction of total training steps completed.
Returns:
The mean of the cross entropy loss.
"""
loss = torch.nn.functional.cross_entropy(logits, labels)
return loss.mean()
# Path: weak_to_strong/model.py
class TransformerWithHead(PreTrainedModel):
"""
This class initializes the linear head to zeros
"""
def __init__(self, name, linear_probe=False, **kwargs):
config = AutoConfig.from_pretrained(name, **kwargs)
super().__init__(config)
self.num_labels = config.num_labels
lm = AutoModelForCausalLM.from_pretrained(name, **kwargs)
self.lm = lm
self.transformer = lm.transformer
hidden_size = getattr(config, "n_embd", getattr(config, "hidden_size", None))
self.score = torch.nn.Linear(hidden_size, self.num_labels, bias=False).to(
lm.lm_head.weight.dtype
)
torch.nn.init.normal_(self.score.weight, std=0.0)
self.linear_probe = linear_probe
@classmethod
def from_pretrained(cls, name, **kwargs):
return cls(name, **kwargs)
def gradient_checkpointing_enable(self):
model = self.transformer
(
model if hasattr(model, "save_pretrained") else model.module
).gradient_checkpointing_enable()
def forward(self, input_ids: torch.LongTensor):
"""
Forward pass of the model with a linear head.
Parameters:
input_ids (torch.LongTensor): Input tensor containing the token ids.
Returns:
HeadOutput: Output dataclass containing the logits.
"""
input_lens = (input_ids != 0).sum(dim=-1)
transformer_outputs = self.transformer(input_ids)
hidden_states = torch.stack(
[transformer_outputs[0][i, input_lens[i] - 1, :] for i in range(len(input_lens))]
)
self.score.to(hidden_states.device)
if self.linear_probe:
hidden_states = hidden_states.detach()
logits = self.score(hidden_states)
return logits
# Path: weak_to_strong/train.py
import itertools
import os
import pickle
import time
import datasets
import numpy as np
import torch
import torch_optimizer as toptim
import weak_to_strong.logger as logger
from dataclasses import dataclass
from typing import Callable, Optional
from transformers.modeling_utils import load_sharded_checkpoint
from weak_to_strong.common import clear_mem
from weak_to_strong.eval import eval_model_acc
from weak_to_strong.loss import xent_loss
from weak_to_strong.model import TransformerWithHead
@dataclass
class ModelConfig:
name: str
default_lr: float
eval_batch_size: int
custom_kwargs: Optional[dict] = None
gradient_checkpointing: bool = False
model_parallel: bool = False
default_optimizer: str = "adam"
def train_model(
model: torch.nn.Module,
ds: datasets.Dataset,
batch_size: int,
lr: float = 1e-5,
| loss_fn: Callable = xent_loss, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: SqueezeAILab/LLMCompiler
# Path: configs/hotpotqa/gpt_prompts.py
OUTPUT_PROMPT = (
"Solve a question answering task with interleaving Observation, Thought, and Action steps. Here are some guidelines:\n"
" - You will be given a Question and some Wikipedia passages, which are the Observations.\n"
" - Thought needs to reason about the question based on the Observations in 1-2 sentences.\n"
" - There are cases where the Observations are unclear or irrelevant (in the case wikipedia search was not successful). In such a case where the Observations are unclear, you must make a best guess based on your own knowledge if you don't know the answer. You MUST NEVER say in your thought that you don't know the answer.\n\n"
"Action can be only one type:\n"
f" (1) {JOINNER_FINISH}(answer): returns the answer and finishes the task. "
"Answer should be short and a single item and MUST not be multiple choices. Answer MUST NEVER be 'unclear', 'unknown', 'neither', 'unrelated' or 'undetermined', and otherwise you will be PENALIZED.\n"
"\n"
"Here are some examples:\n"
"\n"
"Question: Which magazine was started first Arthur's Magazine or First for Women?\n"
"\n"
"search(Arthur's Magazine)\n"
"Observation: Arthur's Magazine (1844-1846) was an American literary periodical published in Philadelphia in the 19th century.\n"
"search(First for Women (magazine))\n"
"Observation: First for Women is a woman's magazine published by Bauer Media Group in the USA.[1] The magazine was started in 1989.\n"
"Thought: First for Women was started in 1989. 1844 (Arthur's Magazine) < 1989 (First for Women), so Arthur's Magazine was started first.\n"
f"Action: {JOINNER_FINISH}(Arthur's Magazine)\n"
"###\n"
"\n"
"Question: Were Pavel Urysohn and Leonid Levin known for the same type of work?\n"
"search(Pavel Urysohn)\n"
"Observation: Pavel Samuilovich Urysohn (February 3, 1898 - August 17, 1924) was a Soviet mathematician who is best known for his contributions in dimension theory.\n"
"search(Leonid Levin)\n"
"Observation: Leonid Anatolievich Levin is a Soviet-American mathematician and computer scientist.\n"
"Thought: Pavel Urysohn is a mathematician. Leonid Levin is a mathematician and computer scientist. So Pavel Urysohn and Leonid Levin have the same type of work.\n"
f"Action: {JOINNER_FINISH}(yes)\n"
"###\n"
"\n"
)
# Path: configs/hotpotqa/gpt_prompts.py
PLANNER_PROMPT = (
"Question: Which magazine was started first Arthur's Magazine or First for Women?\n"
'1. search("Arthur\'s Magazine")\n'
'2. search("First for Women (magazine)")\n'
"Thought: I can answer the question now.\n"
f"3. join(){END_OF_PLAN}\n"
"###\n"
"\n"
"Question: Were Pavel Urysohn and Leonid Levin known for the same type of work?\n"
'1. search("Pavel Urysohn")\n'
'2. search("Leonid Levin")\n'
"Thought: I can answer the question now.\n"
f"3. join(){END_OF_PLAN}\n"
"###\n"
"\n"
)
# Path: configs/hotpotqa/configs.py
from configs.hotpotqa.gpt_prompts import OUTPUT_PROMPT, PLANNER_PROMPT
CONFIGS = {
"default_model": "gpt-3.5-turbo-1106",
"planner_prompt": PLANNER_PROMPT,
| "output_prompt": OUTPUT_PROMPT, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: open-compass/MixtralKit
# Path: mixtralkit/layers/utils.py
class ModelArgs:
dim: int = 4096
n_layers: int = 32
n_heads: int = 32
n_kv_heads: Optional[int] = None
vocab_size: int = -1 # defined later by tokenizer
multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2
ffn_dim_multiplier: Optional[float] = None
norm_eps: float = 1e-5
max_batch_size: int = 32
max_seq_len: int = 2048
# Path: mixtralkit/layers/utils.py
def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
"""torch.repeat_interleave(x, dim=2, repeats=n_rep)"""
bs, slen, n_kv_heads, head_dim = x.shape
if n_rep == 1:
return x
return (
x[:, :, :, None, :]
.expand(bs, slen, n_kv_heads, n_rep, head_dim)
.reshape(bs, slen, n_kv_heads * n_rep, head_dim)
)
# Path: mixtralkit/layers/position_embeding.py
def apply_rotary_emb(
xq: torch.Tensor,
xk: torch.Tensor,
freqs_cis: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Apply rotary embeddings to input tensors using the given frequency tensor.
This function applies rotary embeddings to the given query 'xq' and key 'xk' tensors using the provided
frequency tensor 'freqs_cis'. The input tensors are reshaped as complex numbers, and the frequency tensor
is reshaped for broadcasting compatibility. The resulting tensors contain rotary embeddings and are
returned as real tensors.
Args:
xq (torch.Tensor): Query tensor to apply rotary embeddings.
xk (torch.Tensor): Key tensor to apply rotary embeddings.
freqs_cis (torch.Tensor): Precomputed frequency tensor for complex exponentials.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings.
"""
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
return xq_out.type_as(xq), xk_out.type_as(xk)
# Path: mixtralkit/layers/attention.py
import math
import torch
import torch.nn.functional as F
import fairscale.nn.model_parallel.initialize as fs_init
from typing import Optional, Tuple
from torch import nn
from .utils import ModelArgs, repeat_kv
from .position_embeding import apply_rotary_emb
from fairscale.nn.model_parallel.layers import (
ColumnParallelLinear,
RowParallelLinear,
)
# Copyright (c) OpenMMLab. and affiliates.
# Copyright (c) Meta Platforms, Inc. and affiliates.
class TorchAttention(nn.Module):
"""Multi-head attention module."""
def __init__(self, args: ModelArgs):
"""
Initialize the Attention module.
Args:
args (ModelArgs): Model configuration parameters.
Attributes:
n_kv_heads (int): Number of key and value heads.
n_local_heads (int): Number of local query heads.
n_local_kv_heads (int): Number of local key and value heads.
n_rep (int): Number of repetitions for local heads.
head_dim (int): Dimension size of each attention head.
wq (ColumnParallelLinear): Linear transformation for queries.
wk (ColumnParallelLinear): Linear transformation for keys.
wv (ColumnParallelLinear): Linear transformation for values.
wo (RowParallelLinear): Linear transformation for output.
cache_k (torch.Tensor): Cached keys for attention.
cache_v (torch.Tensor): Cached values for attention.
"""
super().__init__()
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
model_parallel_size = 1
self.n_local_heads = args.n_heads // model_parallel_size
self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
self.n_rep = self.n_local_heads // self.n_local_kv_heads
self.head_dim = args.dim // args.n_heads
self.wq = nn.Linear(
args.dim,
args.n_heads * self.head_dim,
bias=False,
)
self.wk = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wv = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wo = nn.Linear(
args.n_heads * self.head_dim,
args.dim,
bias=False,
)
self.cache_k = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
self.cache_v = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
mask: Optional[torch.Tensor],
):
"""
Forward pass of the attention module.
Args:
x (torch.Tensor): Input tensor.
start_pos (int): Starting position for caching.
freqs_cis (torch.Tensor): Precomputed frequency tensor.
mask (torch.Tensor, optional): Attention mask tensor.
Returns:
torch.Tensor: Output tensor after attention.
"""
bsz, seqlen, _ = x.shape
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
| xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: aymenfurter/microagents
# Path: agents/microagent_manager.py
class MicroAgentManager:
"""
Manages the creation and retrieval of micro agents.
"""
def __init__(self, api_key: str, max_agents: int = 20, db_filename="agents.db"):
self.api_key = api_key
self.max_agents = max_agents
self.openai_wrapper = OpenAIAPIWrapper(api_key)
self.agent_persistence = AgentPersistenceManager(db_filename)
self.agent_lifecycle = AgentLifecycle(self.openai_wrapper, self.agent_persistence, max_agents)
self.load_agents()
def cleanup_agents(self):
"""Remove all agents with status stopped = True"""
self.agent_lifecycle.cleanup_agents()
def load_agents(self):
"""Loads agents from the database."""
loaded_agents = self.agent_persistence.load_all_agents(self.agent_lifecycle, self.openai_wrapper)
self.agent_lifecycle.agents.extend(loaded_agents)
logger.info(f"Loaded {len(loaded_agents)} agents from the database.")
def get_agents(self) -> List[Any]:
"""Returns the list of agents."""
self.cleanup_agents()
return self.agent_lifecycle.agents
def create_agents(self) -> None:
"""Creates prime agents and logs the process."""
logger.info("Creating agents...")
try:
self.agent_lifecycle.create_prime_agent()
logger.info("Agents created successfully.")
except Exception as e:
logger.exception(f"Error in creating agents: {e}")
raise
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str) -> Any:
"""
Retrieves an existing agent or creates a new one based on the given purpose.
"""
logger.info(f"Getting or creating agent for purpose: {purpose}")
try:
agent = self.agent_lifecycle.get_or_create_agent(purpose, depth, sample_input)
logger.info(f"Agent for purpose '{purpose}' retrieved or created.")
return agent
except Exception as e:
logging.exception(f"Error in getting or creating agent: {e}")
raise
def display_agent_status(self):
"""Displays the current status of all agents."""
for agent in self.get_agents():
logger.info(f"Agent {agent.purpose}: Status = {agent.current_status}, Evolve Count = {agent.evolve_count}")
def display_active_agent_tree(self):
"""Displays a tree view of active agent relationships."""
for agent in self.get_agents():
if agent.active_agents:
logger.info(f"Agent {agent.purpose} is calling: {agent.active_agents}")
else:
logger.info(f"Agent {agent.purpose} is currently idle.")
# Path: agents/microagent.py
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "❌ Deleted"
self.stopped = True
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stopped:
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
# Path: gradio_ui/agent_manager.py
import logging
from typing import Any, List
from agents.microagent_manager import MicroAgentManager
from agents.microagent import MicroAgent
logger = logging.getLogger(__name__)
class GradioAgentManager:
"""
A wrapper class for interacting with MicroAgentManager in a Gradio interface.
"""
def __init__(self, api_key: str):
self.manager = MicroAgentManager(api_key)
self.manager.create_agents()
def get_agents_info(self) -> List[dict]:
"""
Retrieve information about all agents for display in Gradio.
"""
agents = self.manager.get_agents()
return [self.format_agent_info(agent) for agent in agents]
| def format_agent_info(self, agent: MicroAgent) -> dict: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: bytedance/ImageDream
# Path: extern/ldm_zero123/thirdp/psp/helpers.py
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
# Path: extern/ldm_zero123/thirdp/psp/helpers.py
class bottleneck_IR(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth),
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth),
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
# Path: extern/ldm_zero123/thirdp/psp/helpers.py
class bottleneck_IR_SE(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth),
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth),
SEModule(depth, 16),
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
# Path: extern/ldm_zero123/thirdp/psp/helpers.py
def get_blocks(num_layers):
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3),
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3),
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3),
]
else:
raise ValueError(
"Invalid number of layers: {}. Must be one of [50, 100, 152]".format(
num_layers
)
)
return blocks
# Path: extern/ldm_zero123/thirdp/psp/helpers.py
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
# Path: extern/ldm_zero123/thirdp/psp/model_irse.py
from torch.nn import (
BatchNorm1d,
BatchNorm2d,
Conv2d,
Dropout,
Linear,
Module,
PReLU,
Sequential,
)
from extern.ldm_zero123.thirdp.psp.helpers import (
Flatten,
bottleneck_IR,
bottleneck_IR_SE,
get_blocks,
l2_norm,
)
# https://github.com/eladrich/pixel2style2pixel
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ["ir", "ir_se"], "mode should be ir or ir_se"
blocks = get_blocks(num_layers)
if mode == "ir":
| unit_module = bottleneck_IR |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TencentARC/MotionCtrl
# Path: lvdm/common.py
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
try:
return ckpt(func, *inputs)
except:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
# Path: lvdm/common.py
def exists(val):
return val is not None
# Path: lvdm/common.py
def uniq(arr):
return{el: True for el in arr}.keys()
# Path: lvdm/common.py
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
# Path: lvdm/common.py
def max_neg_value(t):
return -torch.finfo(t.dtype).max
# Path: lvdm/common.py
def init_(tensor):
dim = tensor.shape[-1]
std = 1 / math.sqrt(dim)
tensor.uniform_(-std, std)
return tensor
# Path: lvdm/basics.py
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
# Path: lvdm/basics.py
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
# Path: lvdm/basics.py
def normalization(channels, num_groups=32):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNormSpecific(num_groups, channels)
# Path: lvdm/modules/attention_temporal.py
import math
import torch
import torch as th
import torch.nn.functional as F
import xformers
import xformers.ops
from inspect import isfunction
from torch import nn, einsum
from einops import rearrange, repeat
from lvdm.common import (
checkpoint,
exists,
uniq,
default,
max_neg_value,
init_
)
from lvdm.basics import (
conv_nd,
zero_module,
normalization
)
try:
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
class GEGLU(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim=-1)
return x * F.gelu(gate)
class FeedForward(nn.Module):
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
super().__init__()
inner_dim = int(dim * mult)
| dim_out = default(dim_out, dim) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: s-casci/tinyzero
# Path: models.py
class LinearNetwork(nn.Module):
def __init__(self, input_shape, action_space, first_layer_size=512, second_layer_size=256):
super().__init__()
self.first_layer = nn.Linear(input_shape[0], first_layer_size)
self.second_layer = nn.Linear(first_layer_size, second_layer_size)
self.value_head = nn.Linear(second_layer_size, 1)
self.policy_head = nn.Linear(second_layer_size, action_space)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.to(self.device)
def __call__(self, observations):
self.train()
x = F.relu(self.first_layer(observations))
x = F.relu(self.second_layer(x))
value = F.tanh(self.value_head(x))
log_policy = F.log_softmax(self.policy_head(x), dim=-1)
return value, log_policy
def value_forward(self, observation):
self.eval()
with torch.no_grad():
x = F.relu(self.first_layer(observation))
x = F.relu(self.second_layer(x))
value = F.tanh(self.value_head(x))
return value
def policy_forward(self, observation):
self.eval()
with torch.no_grad():
x = F.relu(self.first_layer(observation))
x = F.relu(self.second_layer(x))
log_policy = F.softmax(self.policy_head(x), dim=-1)
return log_policy
# Path: agents.py
class AlphaZeroAgent:
def __init__(self, model):
self.model = model
def value_fn(self, game):
observation = torch.tensor(game.to_observation(), device=self.model.device, requires_grad=False)
value = self.model.value_forward(observation)
return value.item()
def policy_fn(self, game):
observation = torch.tensor(game.to_observation(), device=self.model.device, requires_grad=False)
policy = self.model.policy_forward(observation)
return policy.cpu().numpy()
# Path: agents.py
class ClassicMCTSAgent:
@staticmethod
def value_fn(game):
game = copy.deepcopy(game)
while first_person_result := game.get_first_person_result() is None:
game.step(np.random.choice(game.get_legal_actions()))
return first_person_result
@staticmethod
def policy_fn(game):
return np.ones(game.action_space) / game.action_space
# Path: mcts.py
def pit(game, agent1, agent2, agent1_play_kwargs, agent2_play_kwargs):
current_agent, other_agent = agent1, agent2
current_agent_play_kwargs, other_agent_play_kwargs = agent1_play_kwargs, agent2_play_kwargs
while (result := game.get_result()) is None:
action = play(game, current_agent, **current_agent_play_kwargs)
game.step(action)
current_agent, other_agent = other_agent, current_agent
current_agent_play_kwargs, other_agent_play_kwargs = other_agent_play_kwargs, current_agent_play_kwargs
return result
# Path: tictactoe/one_dim/eval.py
from game import TicTacToe
from train import OUT_DIR, SEARCH_ITERATIONS
from tqdm import tqdm
from models import LinearNetwork # noqa: E402
from agents import AlphaZeroAgent, ClassicMCTSAgent # noqa: E402
from mcts import pit # noqa: E402
import torch
import os
import sys
sys.path.append(os.getcwd())
EVAL_GAMES = 100
if __name__ == "__main__":
game = TicTacToe()
model = LinearNetwork(game.observation_shape, game.action_space)
model.load_state_dict(torch.load(f"{OUT_DIR}/model.pth"))
agent = AlphaZeroAgent(model)
agent_play_kwargs = {"search_iterations": SEARCH_ITERATIONS * 2, "c_puct": 1.0, "dirichlet_alpha": None}
print(f"Playing {EVAL_GAMES} games against itself")
results = {0: 0, 1: 0, -1: 0}
for _ in tqdm(range(EVAL_GAMES)):
game.reset()
| result = pit( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: facebookresearch/PurpleLlama
# Path: CybersecurityBenchmarks/insecure_code_detector/languages.py
class Language(str, enum.Enum):
C = "c"
CPP = "cpp"
CSHARP = "csharp"
HACK = "hack"
JAVA = "java"
JAVASCRIPT = "javascript"
KOTLIN = "kotlin"
OBJECTIVE_C = "objective_c"
OBJECTIVE_CPP = "objective_cpp"
PHP = "php"
PYTHON = "python"
RUBY = "ruby"
RUST = "rust"
SWIFT = "swift"
XML = "xml"
def __str__(self) -> str:
return self.name.lower()
# Path: CybersecurityBenchmarks/insecure_code_detector/tests/insecure_code_detector_test.py
class InsecureCodeDetectorTest(unittest.IsolatedAsyncioTestCase, ABC):
@unittest.skipUnless(oss.ENABLE_REGEX_ANALYZER, "Regex analyzer not enabled")
async def run_regex_tests(
self, language: Language, tests: list[tuple[str, int]]
) -> None:
for test_case in tests:
test_input, expected_count = test_case
result = await insecure_code_detector.analyze(language, test_input)
print(test_case)
print(result)
self.assertEqual(len(result), expected_count)
@unittest.skipUnless(oss.ENABLE_WEGGLI, "Weggli analyzer not enabled")
async def run_weggli_tests(
self, language: Language, tests: list[tuple[str, int]]
) -> None:
for test_case in tests:
test_input, expected_count = test_case
result = await insecure_code_detector.analyze(
language, test_input, "void foo(){\n", "\n}"
)
self.assertEqual(
len(result), expected_count, "Test failed: {}".format(test_input)
)
@unittest.skipUnless(oss.ENABLE_SEMGREP, "Semgrep analyzer not enabled")
async def run_semgrep_tests(
self, language: Language, tests: list[tuple[str, int]]
) -> None:
for test_case in tests:
test_input, expected_count = test_case
result = await insecure_code_detector.analyze(language, test_input)
self.assertEqual(
len(result), expected_count, "Test failed: {}".format(test_input)
)
# Path: CybersecurityBenchmarks/insecure_code_detector/tests/test_python_insecure_code_detector.py
from ..languages import Language
from .insecure_code_detector_test import InsecureCodeDetectorTest
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# the following test cases contain an input string, and the corresponding number of expected insecure pattern matches
PYTHON_TEST_CASES = [
("import os", 0),
("import hashlib", 0),
("from hashlib import sha256", 0),
("hashlib.new('sha1')", 1),
("hashlib.md5()", 1),
]
| class TestPythonInsecureCodeDetector(InsecureCodeDetectorTest): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: allenai/unified-io-2
# Path: t5x/examples/unified_io/audio_encoder.py
class AudioEncoder(nn.Module):
"""Encodes raw audio spectrograms as features"""
config: Union[ImageVitFeatureConfig, AudioVitFeatureConfig]
def setup(self):
cfg = self.config
# `vision_transformer` is a misnomer but we leave to keep checkpoint compatibility
self.vision_transformer = AudioTransformer(config=cfg)
@nn.compact
def __call__(self, x, mask, pos_ids, *, enable_dropout=True, patch_num=(16, 8)):
cfg = self.config
if cfg.transpose_input:
pos_ids = transpose_input(pos_ids, cfg.default_input_size, cfg.patch_size)
x, x1 = self.vision_transformer(x, mask, pos_ids, enable_dropout=enable_dropout)
return x, x1
# Path: t5x/examples/unified_io/image_encoder.py
class ImageEncoder(nn.Module):
"""Builds features from an image"""
config: Union[ImageVitFeatureConfig, AudioVitFeatureConfig]
def setup(self):
cfg = self.config
self.vision_transformer = VisionTransformer(config=cfg, param_dict=None)
@nn.compact
def __call__(self, x, mask, pos_ids, *, enable_dropout: bool = True, patch_num: Any = (16, 16)):
x, x1 = self.vision_transformer(x, mask, pos_ids, enable_dropout=enable_dropout, patch_num=patch_num)
return x, x1
# Path: t5x/examples/unified_io/modality_processing.py
from collections import OrderedDict
from typing import Mapping
from flax import traverse_util
from seqio import TaskRegistry, FeatureConverter
from t5x.examples.unified_io.audio_encoder import AudioEncoder
from t5x.examples.unified_io.image_encoder import ImageEncoder
from t5x.examples.unified_io.input_modalities import *
from t5x.examples.unified_io.target_modalities import *
"""Code for handling modalities"""
@gin.configurable
def get_target_modalities(
target_modality=['text', 'image', 'audio'],
image_vae_config: ImageViTVQGANConfig=VAEConfig(),
audio_vae_config: AudioViTVQGANConfig=AudioViTVQGANConfig(),
) -> Dict[str, ModalityEncoder]:
"""Return the encoders to use for target modalities"""
out = {}
if 'text' in target_modality:
out['text'] = TargetTextEncoder()
if 'image' in target_modality:
out['image'] = TargetImageDVAEEmbedder(image_vae_config)
if 'audio' in target_modality:
out['audio'] = TargetAudioDVAEEmbedder(audio_vae_config)
return out
@gin.configurable
def get_input_modalities(
input_modality=('text', 'image', 'image_history', 'audio', 'audio_history'),
image_vit_cfg: ImageVitFeatureConfig=ImageVitFeatureConfig(),
audio_vit_cfg: AudioVitFeatureConfig=AudioVitFeatureConfig(),
image_history_cfg: ImageResamplerConfig=ImageResamplerConfig(),
audio_history_cfg: AudioResamplerConfig=AudioResamplerConfig(),
max_img_history=None,
max_audio_history=None,
use_image_vit = False,
use_audio_vit = False,
freeze_vit=False,
use_image_history_vit = False,
use_audio_history_vit = False,
) -> Dict[str, ModalityEncoder]:
"""Returns the ModalityEncoder for the input modalities"""
out = dict()
if 'text' in input_modality:
out["text"] = InputTextEncoder()
image_encoder = None
if use_image_vit or use_image_history_vit:
image_encoder = ImageEncoder(image_vit_cfg)
audio_encoder = None
if use_audio_vit or use_audio_history_vit:
| audio_encoder = AudioEncoder(audio_vit_cfg) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zju3dv/EasyVolcap
# Path: easyvolcap/utils/data_utils.py
def load_pts(filename: str):
from pyntcloud import PyntCloud
cloud = PyntCloud.from_file(filename)
verts = cloud.xyz
if 'red' in cloud.points and 'green' in cloud.points and 'blue' in cloud.points:
r = np.asarray(cloud.points['red'])
g = np.asarray(cloud.points['green'])
b = np.asarray(cloud.points['blue'])
colors = np.stack([r, g, b], axis=-1) / 255
elif 'r' in cloud.points and 'g' in cloud.points and 'b' in cloud.points:
r = np.asarray(cloud.points['r'])
g = np.asarray(cloud.points['g'])
b = np.asarray(cloud.points['b'])
colors = np.stack([r, g, b], axis=-1) / 255
else:
colors = None
if 'nx' in cloud.points and 'ny' in cloud.points and 'nz' in cloud.points:
nx = np.asarray(cloud.points['nx'])
ny = np.asarray(cloud.points['ny'])
nz = np.asarray(cloud.points['nz'])
norms = np.stack([nx, ny, nz], axis=-1)
else:
norms = None
if 'alpha' in cloud.points:
cloud.points['alpha'] = cloud.points['alpha'] / 255
reserved = ['x', 'y', 'z', 'red', 'green', 'blue', 'r', 'g', 'b', 'nx', 'ny', 'nz']
scalars = dotdict({k: np.asarray(cloud.points[k])[..., None] for k in cloud.points if k not in reserved}) # one extra dimension at the back added
return verts, colors, norms, scalars
# Path: easyvolcap/utils/data_utils.py
def export_pts(pts: torch.Tensor, color: torch.Tensor = None, normal: torch.Tensor = None, scalars: dotdict = dotdict(), filename: str = "default.ply"):
from pandas import DataFrame
from pyntcloud import PyntCloud
data = dotdict()
pts = to_numpy(pts) # always blocking?
pts = pts.reshape(-1, 3)
data.x = pts[:, 0].astype(np.float32)
data.y = pts[:, 1].astype(np.float32)
data.z = pts[:, 2].astype(np.float32)
if color is not None:
color = to_numpy(color)
color = color.reshape(-1, 3)
data.red = (color[:, 0] * 255).astype(np.uint8)
data.green = (color[:, 1] * 255).astype(np.uint8)
data.blue = (color[:, 2] * 255).astype(np.uint8)
else:
data.red = (pts[:, 0] * 255).astype(np.uint8)
data.green = (pts[:, 1] * 255).astype(np.uint8)
data.blue = (pts[:, 2] * 255).astype(np.uint8)
if 'alpha' in scalars:
data.alpha = (scalars.alpha * 255).astype(np.uint8)
if normal is not None:
normal = to_numpy(normal)
normal = normal / (np.linalg.norm(normal, axis=-1, keepdims=True) + 1e-13)
normal = normal.reshape(-1, 3)
data.nx = normal[:, 0].astype(np.float32)
data.ny = normal[:, 1].astype(np.float32)
data.nz = normal[:, 2].astype(np.float32)
if scalars is not None:
scalars = to_numpy(scalars)
for k, v in scalars.items():
v = v.reshape(-1, 1)
data[k] = v[:, 0]
df = DataFrame(data)
cloud = PyntCloud(df) # construct the data
dirname = os.path.dirname(filename)
if dirname: os.makedirs(dirname, exist_ok=True)
return cloud.to_file(filename)
# Path: scripts/gaussian/merge_pcd.py
from easyvolcap.utils.console_utils import *
from easyvolcap.utils.data_utils import load_pts, export_pts
from os.path import join
import argparse
import numpy as np
"""
This script will load and convert a .ply visual hull to a points3D file
"""
@catch_throw
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--data_root', default='data/enerf_outdoor/actor2_3')
parser.add_argument('--vhulls_dir', default='merged')
parser.add_argument('--vhulls_dirs', default=['vhulls', 'bkgd/boost'])
parser.add_argument('--pcd_file', default='000000.ply')
args = parser.parse_args()
vs = []
out = join(args.data_root, args.vhulls_dir, args.pcd_file)
for vhull_dir in args.vhulls_dirs:
vhull = join(args.data_root, vhull_dir, args.pcd_file)
| v, c, n, s = load_pts(vhull) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: minghanqin/LangSplat
# Path: utils/graphics_utils.py
def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
Rt = np.zeros((4, 4))
Rt[:3, :3] = R.transpose()
Rt[:3, 3] = t
Rt[3, 3] = 1.0
C2W = np.linalg.inv(Rt)
cam_center = C2W[:3, 3]
cam_center = (cam_center + translate) * scale
C2W[:3, 3] = cam_center
Rt = np.linalg.inv(C2W)
return np.float32(Rt)
# Path: utils/graphics_utils.py
def getProjectionMatrix(znear, zfar, fovX, fovY):
tanHalfFovY = math.tan((fovY / 2))
tanHalfFovX = math.tan((fovX / 2))
top = tanHalfFovY * znear
bottom = -top
right = tanHalfFovX * znear
left = -right
P = torch.zeros(4, 4)
z_sign = 1.0
P[0, 0] = 2.0 * znear / (right - left)
P[1, 1] = 2.0 * znear / (top - bottom)
P[0, 2] = (right + left) / (right - left)
P[1, 2] = (top + bottom) / (top - bottom)
P[3, 2] = z_sign
P[2, 2] = z_sign * zfar / (zfar - znear)
P[2, 3] = -(zfar * znear) / (zfar - znear)
return P
# Path: scene/cameras.py
import os
import pickle
import torch
import numpy as np
from torch import nn
from utils.graphics_utils import getWorld2View2, getProjectionMatrix
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
class Camera(nn.Module):
def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask,
image_name, uid,
trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda"
):
super(Camera, self).__init__()
self.uid = uid
self.colmap_id = colmap_id
self.R = R
self.T = T
self.FoVx = FoVx
self.FoVy = FoVy
self.image_name = image_name
try:
self.data_device = torch.device(data_device)
except Exception as e:
print(e)
print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" )
self.data_device = torch.device("cuda")
self.original_image = image.clamp(0.0, 1.0).to(self.data_device)
self.image_width = self.original_image.shape[2]
self.image_height = self.original_image.shape[1]
if gt_alpha_mask is not None:
self.original_image *= gt_alpha_mask.to(self.data_device)
else:
self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device)
self.zfar = 100.0
self.znear = 0.01
self.trans = trans
self.scale = scale
self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
| self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: SciPhi-AI/agent-search
# Path: agent_search/core/search_types.py
class AgentSearchResult(BaseModel):
"""A dataclass to store the search result"""
score: float
url: str
title: Optional[str]
dataset: Optional[str]
# TODO - Add dict(str, [str, float, ..]) validation
metadata: Any
text: str
def __init__(self, **data: Any):
super().__init__(**data)
if self.title and self.title == self.text[0 : len(self.title)]:
self.text = self.text[len(self.title) :]
self.text = self.text.strip()
def to_string_dict(self) -> dict:
"""Returns a dictionary representation with all values as strings."""
return {
"score": str(self.score),
"url": self.url,
"title": self.title,
"dataset": self.dataset,
"metadata": self.metadata,
"text": self.text,
}
@classmethod
def from_dict(cls, data: dict):
return cls(**data)
# Path: agent_search/core/utils.py
def cosine_similarity(v1: np.ndarray, v2: np.ndarray) -> float:
"""Compute the cosine similarity between two vectors."""
dot_product = np.dot(v1, v2)
norm_v1 = np.linalg.norm(v1)
norm_v2 = np.linalg.norm(v2)
return dot_product / (norm_v1 * norm_v2)
# Path: agent_search/core/utils.py
def get_data_path() -> str:
return os.path.join(
os.path.dirname(__file__),
"..",
"..",
"data",
)
# Path: agent_search/core/utils.py
def load_config(config_dir: Optional[str] = None) -> configparser.ConfigParser:
"""Load the configuration file."""
config = configparser.ConfigParser()
if not config_dir:
config_dir = get_data_path()
config.read(os.path.join(config_dir, "config.ini"))
return config
# Path: agent_search/search/base.py
import csv
import json
import logging
import os
import numpy as np
import psycopg2
import psycopg2
from typing import List
from qdrant_client import QdrantClient
from transformers import AutoModel
from agent_search.core import AgentSearchResult
from agent_search.core.utils import (
cosine_similarity,
get_data_path,
load_config,
)
logger = logging.getLogger(__name__)
class WebSearchEngine:
"""A simple search client for the OpenSearch collection"""
def __init__(
self,
):
try:
except ImportError as e:
raise ImportError(
f"Error {e} while imoprting psycopg2. Please install it with `pip install psycopg2` to run an WebSearchEngine instance."
)
# Load config
| self.config = load_config()["agent_search"] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: yohanshin/WHAM
# Path: configs/constants.py
IMG_FEAT_DIM = {
'resnet': 2048,
'vit': 1024
}
N_JOINTS = 17
PARSED_DATA = f'{root}/parsed_data'
THREEDPW_PTH = f'{root}/3DPW'
RICH_PTH = f'{root}/RICH'
EMDB_PTH = f'{root}/EMDB'
NUM_JOINTS = N_JOINTS
H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]
H36M_TO_J14 = H36M_TO_J17[:14]
J17_TO_H36M = [14, 3, 4, 5, 2, 1, 0, 15, 12, 16, 13, 9, 10, 11, 8, 7, 6]
COCO_AUG_DICT = f'{root}/body_models/coco_aug_dict.pth'
TREE = [[5, 6], 0, 0, 1, 2, -1, -1, 5, 6, 7, 8, -1, -1, 11, 12, 13, 14, 15, 15, 15, 16, 16, 16]
S_BIAS = 1e-1
S_JITTERING = 5e-2
S_PEAK = 3e-1
S_PEAK_MASK = 5e-3
S_MASK = 0.03
MAIN_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] # reduced_joints
FLDR = f'{root}/body_models/smpl/'
SMPLX2SMPL = f'{root}/body_models/smplx2smpl.pkl'
FACES = f'{root}/body_models/smpl_faces.npy'
MEAN_PARAMS = f'{root}/body_models/smpl_mean_params.npz'
JOINTS_REGRESSOR_WHAM = f'{root}/body_models/J_regressor_wham.npy'
JOINTS_REGRESSOR_H36M = f'{root}/body_models/J_regressor_h36m.npy'
JOINTS_REGRESSOR_EXTRA = f'{root}/body_models/J_regressor_extra.npy'
JOINTS_REGRESSOR_FEET = f'{root}/body_models/J_regressor_feet.npy'
PARENTS = torch.tensor([
-1, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14, 16, 17, 18, 19, 20, 21])
class PATHS:
class KEYPOINTS:
class BMODEL:
# Path: lib/data/normalizer.py
class Normalizer:
def __init__(self, cfg):
pass
def __call__(self, kp_2d, res, cam_intrinsics, patch_width=224, patch_height=224, bbox=None, mask=None):
if bbox is None:
bbox = compute_bbox_from_keypoints(kp_2d, do_augment=True, mask=mask)
out_kp_2d = self.bbox_normalization(kp_2d, bbox, res, patch_width, patch_height)
return out_kp_2d, bbox
def bbox_normalization(self, kp_2d, bbox, res, patch_width, patch_height):
to_torch = False
if isinstance(kp_2d, torch.Tensor):
to_torch = True
kp_2d = kp_2d.numpy()
bbox = bbox.numpy()
out_kp_2d = np.zeros_like(kp_2d)
for idx in range(len(out_kp_2d)):
out_kp_2d[idx] = transform_keypoints(kp_2d[idx], bbox[idx][:3], patch_width, patch_height)[0]
out_kp_2d[idx] = normalize_keypoints_to_patch(out_kp_2d[idx], patch_width)
if to_torch:
out_kp_2d = torch.from_numpy(out_kp_2d)
bbox = torch.from_numpy(bbox)
centers = normalize_keypoints_to_image(bbox[:, :2].unsqueeze(1), res).squeeze(1)
scale = bbox[:, 2:] * 200 / res.max()
location = torch.cat((centers, scale), dim=-1)
out_kp_2d = out_kp_2d.reshape(out_kp_2d.shape[0], -1)
out_kp_2d = torch.cat((out_kp_2d, location), dim=-1)
return out_kp_2d
# Path: lib/utils/imutils.py
def transform(pt, center, scale, res, invert=0, rot=0):
"""Transform pixel location to different reference."""
t = get_transform(center, scale, res, rot=rot)
if invert:
t = np.linalg.inv(t)
new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T
new_pt = np.dot(t, new_pt)
return np.array([round(new_pt[0]), round(new_pt[1])], dtype=int) + 1
# Path: lib/data/_dataset.py
import torch
import numpy as np
from skimage.util.shape import view_as_windows
from configs import constants as _C
from .normalizer import Normalizer
from lib.utils.imutils import transform
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
class BaseDataset(torch.utils.data.Dataset):
def __init__(self, cfg, training=True):
super(BaseDataset, self).__init__()
self.n_joints = _C.KEYPOINTS.NUM_JOINTS
self.epoch = 0
self.n_frames = cfg.DATASET.SEQLEN + 1
self.training = training
| self.keypoints_normalizer = Normalizer(cfg) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: octo-models/octo
# Path: octo/data/utils/data_utils.py
def binarize_gripper_actions(actions: tf.Tensor) -> tf.Tensor:
"""Converts gripper actions from continous to binary values (0 and 1).
We exploit that fact that most of the time, the gripper is fully open (near 1.0) or fully closed (near
0.0). As it transitions between the two, it sometimes passes through a few intermediate values. We relabel
those intermediate values based on the state that is reached _after_ those intermediate values.
In the edge case that the trajectory ends with an intermediate value, we give up on binarizing and relabel
that chunk of intermediate values as the last action in the trajectory.
The scan implements the following code:
new_actions = np.empty_like(actions)
carry = actions[-1]
for i in reversed(range(actions.shape[0])):
if in_between_mask[i]:
carry = carry
else:
carry = float(open_mask[i])
new_actions[i] = carry
"""
open_mask = actions > 0.95
closed_mask = actions < 0.05
in_between_mask = tf.logical_not(tf.logical_or(open_mask, closed_mask))
is_open_float = tf.cast(open_mask, tf.float32)
def scan_fn(carry, i):
return tf.cond(
in_between_mask[i],
lambda: tf.cast(carry, tf.float32),
lambda: is_open_float[i],
)
new_actions = tf.scan(
scan_fn, tf.range(tf.shape(actions)[0]), actions[-1], reverse=True
)
return new_actions
# Path: octo/data/utils/data_utils.py
def invert_gripper_actions(actions: tf.Tensor):
return 1 - actions
# Path: octo/data/utils/data_utils.py
def rel2abs_gripper_actions(actions: tf.Tensor):
"""
Converts relative gripper actions (+1 for closing, -1 for opening) to absolute gripper actions
(0 for closed, 1 for open). Assumes that the first relative gripper is not redundant
(i.e. close when already closed).
"""
opening_mask = actions < -0.1
closing_mask = actions > 0.1
# -1 for closing, 1 for opening, 0 for no change
thresholded_actions = tf.where(opening_mask, 1, tf.where(closing_mask, -1, 0))
def scan_fn(carry, i):
return tf.cond(
thresholded_actions[i] == 0,
lambda: carry,
lambda: thresholded_actions[i],
)
# if no relative grasp, assumes open for whole trajectory
start = -1 * thresholded_actions[tf.argmax(thresholded_actions != 0, axis=0)]
start = tf.cond(start == 0, lambda: 1, lambda: start)
# -1 for closed, 1 for open
new_actions = tf.scan(scan_fn, tf.range(tf.shape(actions)[0]), start)
new_actions = tf.cast(new_actions, tf.float32) / 2 + 0.5
return new_actions
# Path: octo/data/utils/data_utils.py
def relabel_actions(traj: Dict[str, Any]) -> Dict[str, Any]:
"""Relabels the actions to use the reached proprio instead. Discards the last timestep of the
trajectory (since we don't have a next state to compute the action.)
"""
# relabel the first 6 action dims (xyz position, xyz rotation) using the reached proprio
movement_actions = (
traj["observation"]["state"][1:, :6] - traj["observation"]["state"][:-1, :6]
)
# discard the last timestep of the trajectory
traj_truncated = tf.nest.map_structure(lambda x: x[:-1], traj)
# recombine to get full actions
traj_truncated["action"] = tf.concat(
[movement_actions, traj["action"][:-1, -1:]],
axis=1,
)
return traj_truncated
# Path: octo/data/oxe/oxe_standardization_transforms.py
from typing import Any, Dict
from octo.data.utils.data_utils import (
binarize_gripper_actions,
invert_gripper_actions,
rel2abs_gripper_actions,
relabel_actions,
)
import tensorflow as tf
import tensorflow_graphics.geometry.transformation as tft
import tensorflow_graphics.geometry.transformation as tft
import tensorflow_graphics.geometry.transformation as tft
"""Open X-Embodiment Dataset Transforms
input: dict of features, each is batched, i.e. has leading time dimension
expected output:
step = {
'observation': {
<image_keys, depth_image_keys>
state in chosen state representation
},
'action': action in chosen action representation,
'language_instruction': str,
}
"""
def bridge_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]:
# NOTE: this is not actually the official OXE copy of bridge, it is our own more up-to-date copy that you
# can find at https://rail.eecs.berkeley.edu/datasets/bridge_release/data/tfds/
trajectory["action"] = tf.concat(
[
trajectory["action"][:, :6],
| binarize_gripper_actions(trajectory["action"][:, -1])[:, None], |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mistralai/client-python
# Path: tests/utils.py
def mock_chat_response_payload():
return orjson.dumps(
{
"id": "chat-98c8c60e3fbf4fc49658eddaf447357c",
"object": "chat.completion",
"created": 1703165682,
"choices": [
{
"finish_reason": "stop",
"message": {
"role": "assistant",
"content": "What is the best French cheese?",
},
"index": 0,
}
],
"model": "mistral-small",
"usage": {"prompt_tokens": 90, "total_tokens": 90, "completion_tokens": 0},
}
).decode()
# Path: tests/utils.py
def mock_chat_response_streaming_payload():
return [
"data: "
+ orjson.dumps(
{
"id": "cmpl-8cd9019d21ba490aa6b9740f5d0a883e",
"model": "mistral-small",
"choices": [
{
"index": 0,
"delta": {"role": "assistant"},
"finish_reason": None,
}
],
}
).decode()
+ "\n\n",
*[
"data: "
+ orjson.dumps(
{
"id": "cmpl-8cd9019d21ba490aa6b9740f5d0a883e",
"object": "chat.completion.chunk",
"created": 1703168544,
"model": "mistral-small",
"choices": [
{
"index": i,
"delta": {"content": f"stream response {i}"},
"finish_reason": None,
}
],
}
).decode()
+ "\n\n"
for i in range(10)
],
"data: [DONE]\n\n",
]
# Path: tests/utils.py
def mock_response(
status_code: int, content: str, is_json: bool = True
) -> mock.MagicMock:
response = mock.Mock(Response)
response.status_code = status_code
if is_json:
response.json = mock.MagicMock()
response.json.return_value = orjson.loads(content)
response.text = content
return response
# Path: tests/utils.py
@contextlib.contextmanager
def mock_stream_response(status_code: int, content: List[str]):
response = mock.Mock(Response)
response.status_code = status_code
response.iter_lines.return_value = iter(content)
yield response
# Path: tests/test_chat.py
import unittest.mock as mock
import pytest
from mistralai.client import MistralClient
from mistralai.models.chat_completion import (
ChatCompletionResponse,
ChatCompletionStreamResponse,
ChatMessage,
)
from .utils import (
mock_chat_response_payload,
mock_chat_response_streaming_payload,
mock_response,
mock_stream_response,
)
@pytest.fixture()
def client():
client = MistralClient()
client._client = mock.MagicMock()
return client
class TestChat:
def test_chat(self, client):
client._client.request.return_value = mock_response(
200,
mock_chat_response_payload(),
)
result = client.chat(
model="mistral-small",
messages=[
ChatMessage(role="user", content="What is the best French cheese?")
],
)
client._client.request.assert_called_once_with(
"post",
"https://api.mistral.ai/v1/chat/completions",
headers={
"User-Agent": f"mistral-client-python/{client._version}",
"Accept": "application/json",
"Authorization": "Bearer None",
"Content-Type": "application/json",
},
json={
"model": "mistral-small",
"messages": [
{"role": "user", "content": "What is the best French cheese?"}
],
"safe_prompt": False,
"stream": False,
},
)
assert isinstance(
result, ChatCompletionResponse
), "Should return an ChatCompletionResponse"
assert len(result.choices) == 1
assert result.choices[0].index == 0
assert result.object == "chat.completion"
def test_chat_streaming(self, client):
| client._client.stream.return_value = mock_stream_response( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kijai/ComfyUI-Marigold
# Path: marigold/model/rgb_encoder.py
class RGBEncoder(nn.Module):
"""
The encoder of pretrained Stable Diffusion VAE
"""
def __init__(self, pretrained_path, subfolder=None) -> None:
super().__init__()
vae: AutoencoderKL = AutoencoderKL.from_pretrained(pretrained_path, subfolder=subfolder)
logging.info(f"pretrained AutoencoderKL loaded from: {pretrained_path}")
self.rgb_encoder = nn.Sequential(
vae.encoder,
vae.quant_conv,
)
def to(self, *args, **kwargs):
self.rgb_encoder.to(*args, **kwargs)
def forward(self, rgb_in):
return self.encode(rgb_in)
def encode(self, rgb_in):
moments = self.rgb_encoder(rgb_in) # [B, 8, H/8, W/8]
mean, logvar = torch.chunk(moments, 2, dim=1)
rgb_latent = mean
return rgb_latent
# Path: marigold/model/stacked_depth_AE.py
class StackedDepthAE(nn.Module):
"""
Tailored pretrained image VAE for depth map.
Encode: Depth images are repeated into 3 channels.
Decode: The average of 3 chennels are taken as output.
"""
def __init__(self, pretrained_path, subfolder=None) -> None:
super().__init__()
self.vae: AutoencoderKL = AutoencoderKL.from_pretrained(pretrained_path, subfolder=subfolder)
logging.info(f"pretrained AutoencoderKL loaded from: {pretrained_path}")
def forward(self, depth_in):
depth_latent = self.encode(depth_in)
depth_out = self.decode(depth_latent)
return depth_out
def to(self, *args, **kwargs):
self.vae.to(*args, **kwargs)
@staticmethod
def _stack_depth_images(depth_in):
if 4 == len(depth_in.shape):
stacked = depth_in.repeat(1, 3, 1, 1)
elif 3 == len(depth_in.shape):
stacked = depth_in.unsqueeze(1)
stacked = depth_in.repeat(1, 3, 1, 1)
return stacked
def encode(self, depth_in):
stacked = self._stack_depth_images(depth_in)
h = self.vae.encoder(stacked)
moments = self.vae.quant_conv(h)
mean, logvar = torch.chunk(moments, 2, dim=1)
depth_latent = mean
return depth_latent
def decode(self, depth_latent):
z = self.vae.post_quant_conv(depth_latent)
stacked = self.vae.decoder(z)
depth_mean = stacked.mean(dim=1, keepdim=True)
return depth_mean
# Path: marigold/model/marigold_pipeline.py
import logging
import numpy as np
import torch
from typing import Dict
from diffusers import (
DDIMScheduler,
DDPMScheduler,
PNDMScheduler,
DEISMultistepScheduler,
SchedulerMixin,
UNet2DConditionModel,
)
from torch import nn
from torch.nn import Conv2d
from torch.nn.parameter import Parameter
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from .rgb_encoder import RGBEncoder
from .stacked_depth_AE import StackedDepthAE
# Author: Bingxin Ke
# Last modified: 2023-12-11
class MarigoldPipeline(nn.Module):
"""
Marigold monocular depth estimator.
"""
def __init__(
self,
unet_pretrained_path: Dict, # {path: xxx, subfolder: xxx}
rgb_encoder_pretrained_path: Dict,
depht_ae_pretrained_path: Dict,
noise_scheduler_pretrained_path: Dict,
tokenizer_pretrained_path: Dict,
text_encoder_pretrained_path: Dict,
empty_text_embed=None,
trainable_unet=False,
rgb_latent_scale_factor=0.18215,
depth_latent_scale_factor=0.18215,
noise_scheduler_type=None,
enable_gradient_checkpointing=False,
enable_xformers=True,
) -> None:
super().__init__()
self.rgb_latent_scale_factor = rgb_latent_scale_factor
self.depth_latent_scale_factor = depth_latent_scale_factor
self.device = "cpu"
# ******* Initialize modules *******
# Trainable modules
self.trainable_module_dic: Dict[str, nn.Module] = {}
self.trainable_unet = trainable_unet
# Denoising UNet
self.unet: UNet2DConditionModel = UNet2DConditionModel.from_pretrained(
unet_pretrained_path["path"], subfolder=unet_pretrained_path["subfolder"]
)
logging.info(f"pretrained UNet loaded from: {unet_pretrained_path}")
if 8 != self.unet.config["in_channels"]:
self._replace_unet_conv_in()
logging.warning("Unet conv_in layer is replaced")
if enable_xformers:
self.unet.enable_xformers_memory_efficient_attention()
else:
self.unet.disable_xformers_memory_efficient_attention()
# Image encoder
| self.rgb_encoder = RGBEncoder( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: modelscope/richdreamer
# Path: extern/ldm_zero123/thirdp/psp/helpers.py
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
# Path: extern/ldm_zero123/thirdp/psp/helpers.py
class bottleneck_IR(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth),
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth),
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
# Path: extern/ldm_zero123/thirdp/psp/helpers.py
class bottleneck_IR_SE(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth),
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth),
SEModule(depth, 16),
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
# Path: extern/ldm_zero123/thirdp/psp/helpers.py
def get_blocks(num_layers):
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3),
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3),
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3),
]
else:
raise ValueError(
"Invalid number of layers: {}. Must be one of [50, 100, 152]".format(
num_layers
)
)
return blocks
# Path: extern/ldm_zero123/thirdp/psp/helpers.py
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
# Path: extern/ldm_zero123/thirdp/psp/model_irse.py
from torch.nn import (BatchNorm1d, BatchNorm2d, Conv2d, Dropout, Linear,
Module, PReLU, Sequential,)
from extern.ldm_zero123.thirdp.psp.helpers import (Flatten, bottleneck_IR,
bottleneck_IR_SE,
get_blocks, l2_norm,)
# https://github.com/eladrich/pixel2style2pixel
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ["ir", "ir_se"], "mode should be ir or ir_se"
blocks = get_blocks(num_layers)
if mode == "ir":
unit_module = bottleneck_IR
elif mode == "ir_se":
| unit_module = bottleneck_IR_SE |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: rehg-lab/RAVE
# Path: annotator/mmpkg/mmcv/runner/dist_utils.py
def master_only(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rank, _ = get_dist_info()
if rank == 0:
return func(*args, **kwargs)
return wrapper
# Path: annotator/mmpkg/mmcv/utils/logging.py
def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'):
def print_log(msg, logger=None, level=logging.INFO):
# Path: annotator/mmpkg/mmcv/runner/base_module.py
import copy
import warnings
import torch.nn as nn
from abc import ABCMeta
from collections import defaultdict
from logging import FileHandler
from annotator.mmpkg.mmcv.runner.dist_utils import master_only
from annotator.mmpkg.mmcv.utils.logging import get_logger, logger_initialized, print_log
from ..cnn import initialize
from ..cnn.utils.weight_init import update_init_info
# Copyright (c) OpenMMLab. All rights reserved.
class BaseModule(nn.Module, metaclass=ABCMeta):
"""Base module for all modules in openmmlab.
``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional
functionality of parameter initialization. Compared with
``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.
- ``init_cfg``: the config to control the initialization.
- ``init_weights``: The function of parameter
initialization and recording initialization
information.
- ``_params_init_info``: Used to track the parameter
initialization information. This attribute only
exists during executing the ``init_weights``.
Args:
init_cfg (dict, optional): Initialization config dict.
"""
def __init__(self, init_cfg=None):
"""Initialize BaseModule, inherited from `torch.nn.Module`"""
# NOTE init_cfg can be defined in different levels, but init_cfg
# in low levels has a higher priority.
super(BaseModule, self).__init__()
# define default value of init_cfg instead of hard code
# in init_weights() function
self._is_init = False
self.init_cfg = copy.deepcopy(init_cfg)
# Backward compatibility in derived classes
# if pretrained is not None:
# warnings.warn('DeprecationWarning: pretrained is a deprecated \
# key, please consider using init_cfg')
# self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
@property
def is_init(self):
return self._is_init
def init_weights(self):
"""Initialize the weights."""
is_top_level_module = False
# check if it is top-level module
if not hasattr(self, '_params_init_info'):
# The `_params_init_info` is used to record the initialization
# information of the parameters
# the key should be the obj:`nn.Parameter` of model and the value
# should be a dict containing
# - init_info (str): The string that describes the initialization.
# - tmp_mean_value (FloatTensor): The mean of the parameter,
# which indicates whether the parameter has been modified.
# this attribute would be deleted after all parameters
# is initialized.
self._params_init_info = defaultdict(dict)
is_top_level_module = True
# Initialize the `_params_init_info`,
# When detecting the `tmp_mean_value` of
# the corresponding parameter is changed, update related
# initialization information
for name, param in self.named_parameters():
self._params_init_info[param][
'init_info'] = f'The value is the same before and ' \
f'after calling `init_weights` ' \
f'of {self.__class__.__name__} '
self._params_init_info[param][
'tmp_mean_value'] = param.data.mean()
# pass `params_init_info` to all submodules
# All submodules share the same `params_init_info`,
# so it will be updated when parameters are
# modified at any level of the model.
for sub_module in self.modules():
sub_module._params_init_info = self._params_init_info
# Get the initialized logger, if not exist,
# create a logger named `mmcv`
logger_names = list(logger_initialized.keys())
logger_name = logger_names[0] if logger_names else 'mmcv'
module_name = self.__class__.__name__
if not self._is_init:
if self.init_cfg:
| print_log( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: worldcoin/open-iris
# Path: tests/e2e_tests/utils.py
def compare_debug_pipeline_outputs(pipeline_output_1: Dict[str, Any], pipeline_output_2: Dict[str, Any]):
"""Compare two IRISPipeline outputs for debugging.
Args:
pipeline_output_1 (Dict[str, Any]): pipeline output 1.
pipeline_output_2 (Dict[str, Any]): pipeline output 2.
"""
compare_iris_pipeline_template_output(pipeline_output_1["iris_template"], pipeline_output_2["iris_template"])
compare_iris_pipeline_metadata_output(pipeline_output_1["metadata"], pipeline_output_2["metadata"])
# Debug-specific intermediary outputs
to_test = {
"normalized_iris": ["normalized_image", "normalized_mask"],
"iris_response": ["iris_responses", "mask_responses"],
"extrapolated_polygons": ["pupil", "iris", "eyeball"],
}
for key, values in to_test.items():
for value in values:
np.testing.assert_almost_equal(
pipeline_output_1[key][value],
pipeline_output_2[key][value],
decimal=4,
)
np.testing.assert_almost_equal(
pipeline_output_1["segmentation_map"]["predictions"],
pipeline_output_2["segmentation_map"]["predictions"],
decimal=4,
)
# Path: tests/e2e_tests/utils.py
def compare_iris_pipeline_outputs(pipeline_output_1: Dict[str, Any], pipeline_output_2: Dict[str, Any]):
"""Compare two IRISPipeline outputs for the Orb.
Args:
pipeline_output_1 (Dict[str, Any]): pipeline output 1.
pipeline_output_2 (Dict[str, Any]): pipeline output 2.
"""
compare_iris_pipeline_template_output(pipeline_output_1["iris_template"], pipeline_output_2["iris_template"])
compare_iris_pipeline_metadata_output(pipeline_output_1["metadata"], pipeline_output_2["metadata"])
compare_iris_pipeline_error_output(pipeline_output_1["error"], pipeline_output_2["error"])
# Path: tests/e2e_tests/pipelines/test_e2e_iris_pipeline.py
import os
import pickle
import cv2
import numpy as np
import pytest
from typing import Any, Dict
from iris.pipelines.iris_pipeline import IRISPipeline
from tests.e2e_tests.utils import compare_debug_pipeline_outputs, compare_iris_pipeline_outputs
@pytest.fixture
def ir_image() -> np.ndarray:
ir_image_path = os.path.join(os.path.dirname(__file__), "mocks", "inputs", "anonymized.png")
img_data = cv2.imread(ir_image_path, cv2.IMREAD_GRAYSCALE)
return img_data
@pytest.fixture
def expected_iris_pipeline_output() -> Dict[str, Any]:
expected_iris_code_path = os.path.join(
os.path.dirname(__file__), "mocks", "outputs", "expected_iris_orb_pipeline_output.pickle"
)
return pickle.load(open(expected_iris_code_path, "rb"))
@pytest.fixture
def expected_debug_pipeline_output() -> Dict[str, Any]:
expected_iris_code_path = os.path.join(
os.path.dirname(__file__), "mocks", "outputs", "expected_iris_debug_pipeline_output.pickle"
)
return pickle.load(open(expected_iris_code_path, "rb"))
def test_e2e_iris_pipeline(ir_image: np.ndarray, expected_iris_pipeline_output: Dict[str, Any]) -> None:
"""End-to-end test of the IRISPipeline in the Orb setup"""
iris_pipeline = IRISPipeline()
computed_pipeline_output = iris_pipeline(img_data=ir_image, eye_side="right")
compare_iris_pipeline_outputs(computed_pipeline_output, expected_iris_pipeline_output)
def test_e2e_debug_pipeline(ir_image: np.ndarray, expected_debug_pipeline_output: Dict[str, Any]) -> None:
"""End-to-end test of the IRISPipeline in the debug setup"""
iris_pipeline = IRISPipeline(env=IRISPipeline.DEBUGGING_ENVIRONMENT)
computed_pipeline_output = iris_pipeline(img_data=ir_image, eye_side="right")
| compare_debug_pipeline_outputs(computed_pipeline_output, expected_debug_pipeline_output) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: laixintao/mactop
# Path: mactop/widgets/labeled_colorbar.py
class LabeledColorBar(Static):
percentages = reactive(None)
DEFAULT_CSS = """
LabeledColorBar {
layout: horizontal;
}
LabeledColorBar > ColorBar {
width: 1fr;
}
"""
def __init__(
self,
prefix_label,
color_choices,
update_interval,
percentages_update_fn: Callable[[], List[float]],
value_render_fn: Callable[[List[float]], str],
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.percentages_update_fn = percentages_update_fn
self.color_choices = color_choices
self.update_interval = update_interval
self.prefix_label = prefix_label
self.value_render_fn = value_render_fn
def on_mount(self) -> None:
self.set_interval(self.update_interval, self.update_percentages)
def update_percentages(self) -> None:
result = self.percentages_update_fn()
if result is not None:
self.percentages = copy.copy(result)
def watch_percentages(self, percentages) -> None:
if not percentages:
return
try:
number_widget = self.query_one(".colorbar-value")
except textual.css.query.NoMatches:
logger.warning(
"Can not found DOM element in .colorbar-value in LabeledColorBar"
)
return
number_str = self.value_render_fn(percentages)
number_widget.styles.width = len(number_str)
number_widget.update(number_str)
colorbar = self.query_one("ColorBar")
colorbar.percentages = percentages
def compose(self) -> ComposeResult:
yield Label(f"{self.prefix_label}", classes="colorbar-label")
yield ColorBar(self.color_choices)
yield Static(" ", classes="colorbar-value")
# Path: mactop/metrics_store.py
class ProcessorType(enum.Enum):
class Smc:
class PowerMetricsBattery:
class Netowrk:
class CPU:
class M1GPU:
class CPUCore:
class ProcessorPackage:
class M1CPUCluster:
class M1ProcessorPackage:
class ProcessorIntel:
class Disk:
class PowerMetrics:
class AdapterDetails:
class AppleSmartBattery:
class IORegMetrics:
class CPUTimesPercent:
class SwapMemory:
class VirtualMemory:
class LoadAvg:
class PsutilMetrics:
class Metrics:
INTEL = "intel"
M1 = "M1"
def get_core(self, core_index):
def get_psutilmetrics(self):
def set_psutilmetrics(self, p: PsutilMetrics):
def get_powermetrics(self):
def set_powermetrics(self, metrics):
def get_ioregmetrics(self):
def set_ioregmetrics(self, metrics):
# Path: mactop/utils/formatting.py
def render_cpu_percentage_100(percentages):
busy = 100 - percentages[-1]
return f"{busy:2.0f}%"
# Path: mactop/panels/_base.py
class BaseStatic(Static):
def __init__(self, refresh_interval, *args, **kwargs):
super().__init__(*args, **kwargs)
self.refresh_interval = float(refresh_interval)
# Path: mactop/const.py
COLOR_USER="green"
COLOR_NICE="blue"
COLOR_SYSTEM="#006400"
COLOR_IDLE="#2F4F4F"
COLOR_C_STATE="#008000"
COLOR_P_STATE="#FF8C00"
# Path: mactop/panels/cpu_percpu_usage.py
import logging
from functools import partial
from textual.app import ComposeResult
from mactop.widgets import LabeledColorBar
from mactop.metrics_store import metrics
from mactop.utils.formatting import render_cpu_percentage_100
from ._base import BaseStatic
from mactop import const
logger = logging.getLogger(__name__)
def get_percpu_percent(index):
cpus = metrics.psutilmetrics.cpu_percent_percpu
if not cpus:
return [0, 0, 0, 0]
cpu_percent = cpus[index]
return [
cpu_percent.user,
cpu_percent.nice,
cpu_percent.system,
cpu_percent.idle,
]
class CPUUsageBarPanel(BaseStatic):
BORDER_TITLE = "CPU"
DEFAULT_CSS = """
CPUUsageBarPanel {
layout: grid;
grid-gutter: 0 1;
}
"""
def __init__(
self,
color_user=const.COLOR_USER,
color_nice=const.COLOR_NICE,
color_system=const.COLOR_SYSTEM,
color_idle=const.COLOR_IDLE,
columns=4,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.color_user = color_user
self.color_nice = color_nice
self.color_system = color_system
self.color_idle = color_idle
self.columns = int(columns)
def compose(self) -> ComposeResult:
self.styles.grid_size_columns = self.columns
cpu_count = metrics.psutilmetrics.cpu_count
for index in range(cpu_count):
yield LabeledColorBar(
prefix_label=f"[#FFFFE0]{index:>2}[/#FFFFE0]",
color_choices=[
self.color_user,
self.color_nice,
self.color_system,
self.color_idle,
],
percentages_update_fn=partial(get_percpu_percent, index=index),
| value_render_fn=render_cpu_percentage_100, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: geopavlakos/hamer
# Path: hamer/datasets/utils.py
def convert_cvimg_to_tensor(cvimg: np.array):
"""
Convert image from HWC to CHW format.
Args:
cvimg (np.array): Image of shape (H, W, 3) as loaded by OpenCV.
Returns:
np.array: Output image of shape (3, H, W).
"""
# from h,w,c(OpenCV) to c,h,w
img = cvimg.copy()
img = np.transpose(img, (2, 0, 1))
# from int to float
img = img.astype(np.float32)
return img
# Path: hamer/datasets/utils.py
def expand_to_aspect_ratio(input_shape, target_aspect_ratio=None):
"""Increase the size of the bounding box to match the target shape."""
if target_aspect_ratio is None:
return input_shape
try:
w , h = input_shape
except (ValueError, TypeError):
return input_shape
w_t, h_t = target_aspect_ratio
if h / w < h_t / w_t:
h_new = max(w * h_t / w_t, h)
w_new = w
else:
h_new = h
w_new = max(h * w_t / h_t, w)
if h_new < h or w_new < w:
breakpoint()
return np.array([w_new, h_new])
# Path: hamer/datasets/utils.py
def generate_image_patch_cv2(img: np.array, c_x: float, c_y: float,
bb_width: float, bb_height: float,
patch_width: float, patch_height: float,
do_flip: bool, scale: float, rot: float,
border_mode=cv2.BORDER_CONSTANT, border_value=0) -> Tuple[np.array, np.array]:
"""
Crop the input image and return the crop and the corresponding transformation matrix.
Args:
img (np.array): Input image of shape (H, W, 3)
c_x (float): Bounding box center x coordinate in the original image.
c_y (float): Bounding box center y coordinate in the original image.
bb_width (float): Bounding box width.
bb_height (float): Bounding box height.
patch_width (float): Output box width.
patch_height (float): Output box height.
do_flip (bool): Whether to flip image or not.
scale (float): Rescaling factor for the bounding box (augmentation).
rot (float): Random rotation applied to the box.
Returns:
img_patch (np.array): Cropped image patch of shape (patch_height, patch_height, 3)
trans (np.array): Transformation matrix.
"""
img_height, img_width, img_channels = img.shape
if do_flip:
img = img[:, ::-1, :]
c_x = img_width - c_x - 1
trans = gen_trans_from_patch_cv(c_x, c_y, bb_width, bb_height, patch_width, patch_height, scale, rot)
img_patch = cv2.warpAffine(img, trans, (int(patch_width), int(patch_height)),
flags=cv2.INTER_LINEAR,
borderMode=border_mode,
borderValue=border_value,
)
# Force borderValue=cv2.BORDER_CONSTANT for alpha channel
if (img.shape[2] == 4) and (border_mode != cv2.BORDER_CONSTANT):
img_patch[:,:,3] = cv2.warpAffine(img[:,:,3], trans, (int(patch_width), int(patch_height)),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
)
return img_patch, trans
# Path: hamer/datasets/vitdet_dataset.py
from typing import Dict
from skimage.filters import gaussian
from yacs.config import CfgNode
from .utils import (convert_cvimg_to_tensor,
expand_to_aspect_ratio,
generate_image_patch_cv2)
import cv2
import numpy as np
import torch
DEFAULT_MEAN = 255. * np.array([0.485, 0.456, 0.406])
DEFAULT_STD = 255. * np.array([0.229, 0.224, 0.225])
class ViTDetDataset(torch.utils.data.Dataset):
def __init__(self,
cfg: CfgNode,
img_cv2: np.array,
boxes: np.array,
right: np.array,
rescale_factor=2.5,
train: bool = False,
**kwargs):
super().__init__()
self.cfg = cfg
self.img_cv2 = img_cv2
# self.boxes = boxes
assert train == False, "ViTDetDataset is only for inference"
self.train = train
self.img_size = cfg.MODEL.IMAGE_SIZE
self.mean = 255. * np.array(self.cfg.MODEL.IMAGE_MEAN)
self.std = 255. * np.array(self.cfg.MODEL.IMAGE_STD)
# Preprocess annotations
boxes = boxes.astype(np.float32)
self.center = (boxes[:, 2:4] + boxes[:, 0:2]) / 2.0
self.scale = rescale_factor * (boxes[:, 2:4] - boxes[:, 0:2]) / 200.0
self.personid = np.arange(len(boxes), dtype=np.int32)
self.right = right.astype(np.float32)
def __len__(self) -> int:
return len(self.personid)
def __getitem__(self, idx: int) -> Dict[str, np.array]:
center = self.center[idx].copy()
center_x = center[0]
center_y = center[1]
scale = self.scale[idx]
BBOX_SHAPE = self.cfg.MODEL.get('BBOX_SHAPE', None)
| bbox_size = expand_to_aspect_ratio(scale*200, target_aspect_ratio=BBOX_SHAPE).max() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: rogeriochaves/driver
# Path: driver/UIED/run_single.py
def detect_components(
input_path_img, ocr_result: AnnotatedImage, showOCR=False, showUIED=False
) -> DetectElementsResponse:
output_root = "output"
# Resizes the image to be smaller because this process is heavy, and lower resolution
# does not lose much quality when detecting components
max_width_or_height = 982
resized_height = resize_height_by_longest_edge(
input_path_img, resize_length=max_width_or_height
)
# color_tips()
is_clf = False
import detect_text.text_detection as text
os.makedirs(pjoin(output_root, "ocr"), exist_ok=True)
text_json = text.text_detection(
ocr_result, input_path_img, output_root, show=showOCR
)
import detect_compo.ip_region_proposal as ip
os.makedirs(pjoin(output_root, "ip"), exist_ok=True)
# switch of the classification func
classifier = None
if is_clf:
classifier = {}
from cnn.CNN import CNN
# classifier['Image'] = CNN('Image')
classifier["Elements"] = CNN("Elements")
# classifier['Noise'] = CNN('Noise')
compo_json = ip.compo_detection(
input_path_img,
output_root,
key_params,
classifier=classifier,
resize_by_height=resized_height,
show=False,
)
import detect_merge.merge as merge
os.makedirs(pjoin(output_root, "merge"), exist_ok=True)
name = input_path_img.split("/")[-1][:-4]
compo_path = pjoin(output_root, "ip", str(name) + ".json")
ocr_path = pjoin(output_root, "ocr", str(name) + ".json")
board, components = merge.merge(
input_path_img,
compo_json,
text_json,
pjoin(output_root, "merge"),
is_remove_bar=key_params["remove-bar"],
is_paragraph=key_params["merge-line-to-paragraph"],
show=showUIED,
)
return components
# Path: driver/UIED/utils.py
def show_image(window_name: str, image: cv2.typing.MatLike):
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
if sys.platform == "darwin":
os.system(
"""/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "python" to true' """
)
cv2.imshow(window_name, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Hack to fix closing bug on macos: https://stackoverflow.com/questions/6116564/destroywindow-does-not-close-window-on-mac-using-python-and-opencv
for _ in range(1, 5):
cv2.waitKey(1)
# Path: driver/ocr_call.py
def ocr_text_detection(input_image_path, config: DebugConfig) -> AnnotatedImage:
ocr_provider = config["ocr_provider"]
if not ocr_provider:
if os.environ.get("AZURE_VISION_API_KEY"):
ocr_provider = "azure"
elif os.environ.get("GCLOUD_VISION_API_KEY"):
ocr_provider = "google"
elif os.environ.get("BAIDU_OCR_API_KEY"):
ocr_provider = "baidu"
if ocr_provider == "azure":
print_action("Annotating screenshot with Azure Vision")
return azure_ocr_text_detect(input_image_path)
elif ocr_provider == "google":
print_action("Annotating screenshot with Google Cloud Vision")
return google_ocr_text_detect(input_image_path)
elif ocr_provider == "baidu":
print_action("Annotating screenshot with Baidu Vision")
return baidu_ocr_text_detect(input_image_path)
else:
raise Exception(
"No OCR API env variable set, please set either AZURE_VISION_API_KEY or GCLOUD_VISION_API_KEY"
)
# Path: driver/types.py
class Click(TypedDict):
class Type(TypedDict):
class Press(TypedDict):
class Refresh(TypedDict):
class LabelMapItem(TypedDict):
class ImgMultiplierFactor(TypedDict):
class DebugConfig(TypedDict):
class Context(TypedDict):
class Vertex:
class BoundingPoly:
class TextAnnotation:
class AnnotatedImage:
# Path: driver/utils.py
def is_retina_display():
return is_retina
# Path: driver/annotator.py
import math
import os
import cv2
from PIL import Image, ImageDraw, ImageFont
from driver.UIED.run_single import detect_components
from driver.UIED.utils import show_image
from driver.ocr_call import ocr_text_detection
from driver.types import DebugConfig, ImgMultiplierFactor, LabelMap
from driver.utils import is_retina_display
def annotate_image(input_image_path, debug: DebugConfig):
ocr_result = ocr_text_detection(input_image_path, debug)
components = detect_components(
input_image_path,
ocr_result,
showOCR=debug["ocr"],
showUIED=debug["uied"],
)
original_image = Image.open(input_image_path)
size = {"width": original_image.width, "height": original_image.height}
img_multiplier_factor: ImgMultiplierFactor = {
"height": components["img_shape"][0] / size["height"],
"width": components["img_shape"][1] / size["width"],
}
label_counter = 1
label_prefix = "A"
drawn_positions = []
| label_map: LabelMap = {} |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: baidubce/app-builder
# Path: appbuilder/core/component.py
class Component:
r"""Component基类, 其它实现的Component子类需要继承该基类,并至少实现run方法."""
def __init__(self,
meta: Optional[ComponentArguments] = ComponentArguments(),
secret_key: Optional[str] = None,
gateway: str = ""
):
r"""Component初始化方法.
参数:
meta (obj: `ComponentArguments`, 可选) : component元信息.
secret_key(str,可选): 用户鉴权token, 默认从环境变量中获取: os.getenv("APPBUILDER_TOKEN", "").
gateway(str, 可选): 后端网关服务地址,默认从环境变量中获取: os.getenv("GATEWAY_URL", "")
返回:
无
"""
self.meta = meta
self.http_client = HTTPClient(secret_key, gateway)
def __call__(self, *inputs, **kwargs):
r"""implement __call__ method"""
return self.run(*inputs, **kwargs)
def run(self, *inputs, **kwargs):
r"""
Defines the computation performed at every call.
Should be overridden by all subclasses.
Parameters:
*inputs(tuple): unpacked tuple arguments
**kwargs(dict): unpacked dict arguments
"""
raise NotImplementedError
def batch(self, *args, **kwargs) -> List[Message]:
r"""pass"""
return None
async def arun(self, *args, **kwargs) -> Optional[Message]:
r"""pass"""
return None
async def abatch(self, *args, **kwargs) -> List[Message]:
r"""pass"""
return None
def _trace(self, **data) -> None:
r"""pass"""
pass
def _debug(self, **data) -> None:
r"""pass"""
pass
# Path: appbuilder/core/message.py
class Message(BaseModel, Generic[_T]):
content: Optional[_T] = {}
name: Optional[str] = "msg"
mtype: Optional[str] = "dict"
id: Optional[str] = str(uuid.uuid4())
def __init__(self, content: Optional[_T] = None, **data):
if content is not None:
data['content'] = content
super().__init__(**data)
self.mtype = type(self.content).__name__
def __str__(self):
return f"Message(name={self.name}, content={self.content}, mtype={self.mtype})"
def __repr__(self):
return f"{self.__class__.__name__}(name={self.name!r}, content={self.content!r}, mtype={self.mtype!r})"
# Path: appbuilder/core/component.py
class ComponentArguments(BaseModel):
r""""ComponentArguments define Component meta fields"""
name: str = ""
tool_desc: Dict[str, Any] = {}
def extract_values_to_dict(self):
r"""extract ComponentArguments fields to dict"""
inputs = {}
for field_name, field in self.__fields__.items():
value = getattr(self, field_name)
# 获取 display_name 元数据
variable_name = field.field_info.extra.get('variable_name')
if variable_name:
# 使用 Enum 成员的实际值
if isinstance(value, Message):
inputs[variable_name] = str(value.content)
elif isinstance(value, Enum):
inputs[variable_name] = str(value.value)
else:
inputs[variable_name] = str(value)
else:
inputs[field_name] = value
return inputs
# Path: appbuilder/core/components/embeddings/base.py
from abc import abstractmethod
from typing import List, Union
from appbuilder.core.component import Component
from appbuilder.core.message import Message
from appbuilder.core.component import ComponentArguments
"""
base
"""
# Copyright (c) 2023 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class EmbeddingBaseComponent(Component):
"""
EmbeddingBaseComponent
"""
name: str
version: str
| meta: ComponentArguments |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: corfyi/UCMCTrack
# Path: util/run_ucmc.py
def run_ucmc(args, det_path = "det_results/mot17/yolox_x_ablation",
cam_path = "cam_para/mot17",
gmc_path = "gmc/mot17",
out_path = "output/mot17",
exp_name = "val",
dataset = "MOT17"):
seq_name = args.seq
eval_path = os.path.join(out_path,exp_name)
orig_save_path = os.path.join(eval_path,seq_name)
if not os.path.exists(orig_save_path):
os.makedirs(orig_save_path)
if dataset == "MOT17":
det_file = os.path.join(det_path, f"{seq_name}-SDP.txt")
cam_para = os.path.join(cam_path, f"{seq_name}-SDP.txt")
result_file = os.path.join(orig_save_path,f"{seq_name}-SDP.txt")
elif dataset == "MOT20":
det_file = os.path.join(det_path, f"{seq_name}.txt")
cam_para = os.path.join(cam_path, f"{seq_name}.txt")
result_file = os.path.join(orig_save_path,f"{seq_name}.txt")
gmc_file = os.path.join(gmc_path, f"GMC-{seq_name}.txt")
print(det_file)
print(cam_para)
detector = Detector()
detector.load(cam_para, det_file,gmc_file)
print(f"seq_length = {detector.seq_length}")
a1 = args.a
a2 = args.a
high_score = args.high_score
conf_thresh = args.conf_thresh
fps = args.fps
cdt = args.cdt
wx = args.wx
wy = args.wy
vmax = args.vmax
tracker = UCMCTrack(a1, a2, wx,wy,vmax, cdt, fps, dataset, high_score,args.cmc,detector)
t1 = time.time()
tracklets = dict()
with open(result_file,"w") as f:
for frame_id in range(1, detector.seq_length + 1):
dets = detector.get_dets(frame_id, conf_thresh)
tracker.update(dets,frame_id)
if args.hp:
for i in tracker.tentative_idx:
t = tracker.trackers[i]
if(t.detidx < 0 or t.detidx >= len(dets)):
continue
if t.id not in tracklets:
tracklets[t.id] = Tracklet(frame_id, dets[t.detidx].get_box())
else:
tracklets[t.id].add_box(frame_id, dets[t.detidx].get_box())
for i in tracker.confirmed_idx:
t = tracker.trackers[i]
if(t.detidx < 0 or t.detidx >= len(dets)):
continue
if t.id not in tracklets:
tracklets[t.id] = Tracklet(frame_id, dets[t.detidx].get_box())
else:
tracklets[t.id].add_box(frame_id, dets[t.detidx].get_box())
tracklets[t.id].activate()
else:
for i in tracker.confirmed_idx:
t = tracker.trackers[i]
if(t.detidx < 0 or t.detidx >= len(dets)):
continue
d = dets[t.detidx]
f.write(f"{frame_id},{t.id},{d.bb_left:.1f},{d.bb_top:.1f},{d.bb_width:.1f},{d.bb_height:.1f},{d.conf:.2f},-1,-1,-1\n")
if args.hp:
for frame_id in range(1, detector.seq_length + 1):
for id in tracklets:
if tracklets[id].is_active:
if frame_id in tracklets[id].boxes:
box = tracklets[id].boxes[frame_id]
f.write(f"{frame_id},{id},{box[0]:.1f},{box[1]:.1f},{box[2]:.1f},{box[3]:.1f},-1,-1,-1,-1\n")
interpolate(orig_save_path, eval_path, n_min=3, n_dti=cdt, is_enable = True)
print(f"Time cost: {time.time() - t1:.2f}s")
# Path: util/run_ucmc.py
def make_args():
parser = argparse.ArgumentParser(description='Process some arguments.')
parser.add_argument('--seq', type=str, default = "MOT17-02", help='seq name')
parser.add_argument('--fps', type=float, default=30.0, help='fps')
parser.add_argument('--wx', type=float, default=0.1, help='wx')
parser.add_argument('--wy', type=float, default=0.1, help='wy')
parser.add_argument('--vmax', type=float, default=0.5, help='vmax')
parser.add_argument('--a', type=float, default=10.0, help='assignment threshold')
parser.add_argument('--cdt', type=float, default=30.0, help='coasted deletion time')
parser.add_argument('--high_score', type=float, default=0.6, help='high score threshold')
parser.add_argument('--conf_thresh', type=float, default=0.5, help='detection confidence threshold')
parser.add_argument("--cmc", action="store_true", help="use cmc or not.")
parser.add_argument("--hp", action="store_true", help="use head padding or not.")
args = parser.parse_args()
return args
# Path: run_mot20_test.py
from util.run_ucmc import run_ucmc, make_args
if __name__ == '__main__':
det_path = "det_results/mot20"
cam_path = "cam_para/mot20"
gmc_path = "gmc/mot20"
out_path = "output/mot20"
exp_name = "test"
dataset = "MOT20"
| args = make_args() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ingra14m/Specular-Gaussians
# Path: utils/loss_utils.py
def ssim(img1, img2, window_size=11, size_average=True):
channel = img1.size(-3)
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
# Path: utils/image_utils.py
def psnr(img1, img2):
mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
return 20 * torch.log10(1.0 / torch.sqrt(mse))
# Path: metrics.py
from pathlib import Path
from PIL import Image
from utils.loss_utils import ssim
from tqdm import tqdm
from utils.image_utils import psnr
from argparse import ArgumentParser
import os
import torch
import torchvision.transforms.functional as tf
import lpips
import json
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
# from lpipsPyTorch import lpips
def readImages(renders_dir, gt_dir):
renders = []
gts = []
image_names = []
for fname in os.listdir(renders_dir):
render = Image.open(renders_dir / fname)
gt = Image.open(gt_dir / fname)
renders.append(tf.to_tensor(render).unsqueeze(0)[:, :3, :, :].cuda())
gts.append(tf.to_tensor(gt).unsqueeze(0)[:, :3, :, :].cuda())
image_names.append(fname)
return renders, gts, image_names
def evaluate(model_paths):
full_dict = {}
per_view_dict = {}
full_dict_polytopeonly = {}
per_view_dict_polytopeonly = {}
print("")
for scene_dir in model_paths:
try:
print("Scene:", scene_dir)
full_dict[scene_dir] = {}
per_view_dict[scene_dir] = {}
full_dict_polytopeonly[scene_dir] = {}
per_view_dict_polytopeonly[scene_dir] = {}
test_dir = Path(scene_dir) / "test"
for method in os.listdir(test_dir):
if not method.startswith("ours"):
continue
print("Method:", method)
full_dict[scene_dir][method] = {}
per_view_dict[scene_dir][method] = {}
full_dict_polytopeonly[scene_dir][method] = {}
per_view_dict_polytopeonly[scene_dir][method] = {}
method_dir = test_dir / method
gt_dir = method_dir / "gt"
renders_dir = method_dir / "renders"
renders, gts, image_names = readImages(renders_dir, gt_dir)
ssims = []
psnrs = []
lpipss = []
for idx in tqdm(range(len(renders)), desc="Metric evaluation progress"):
| ssims.append(ssim(renders[idx], gts[idx])) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: u2seg/U2Seg
# Path: detectron2/utils/comm.py
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
# Path: detectron2/utils/comm.py
def is_main_process() -> bool:
return get_rank() == 0
# Path: detectron2/utils/logger.py
def log_every_n_seconds(lvl, msg, n=1, *, name=None):
"""
Log no more than once per n seconds.
Args:
lvl (int): the logging level
msg (str):
n (int):
name (str): name of the logger to use. Will use the caller's module by default.
"""
caller_module, key = _find_caller()
last_logged = _LOG_TIMER.get(key, None)
current_time = time.time()
if last_logged is None or current_time - last_logged >= n:
logging.getLogger(name or caller_module).log(lvl, msg)
_LOG_TIMER[key] = current_time
# Path: detectron2/evaluation/evaluator.py
import datetime
import logging
import time
import torch
from collections import OrderedDict, abc
from contextlib import ExitStack, contextmanager
from typing import List, Union
from torch import nn
from detectron2.utils.comm import get_world_size, is_main_process
from detectron2.utils.logger import log_every_n_seconds
# Copyright (c) Facebook, Inc. and its affiliates.
class DatasetEvaluator:
"""
Base class for a dataset evaluator.
The function :func:`inference_on_dataset` runs the model over
all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.
This class will accumulate information of the inputs/outputs (by :meth:`process`),
and produce evaluation results in the end (by :meth:`evaluate`).
"""
def reset(self):
"""
Preparation for a new round of evaluation.
Should be called before starting a round of evaluation.
"""
pass
def process(self, inputs, outputs):
"""
Process the pair of inputs and outputs.
If they contain batches, the pairs can be consumed one-by-one using `zip`:
.. code-block:: python
for input_, output in zip(inputs, outputs):
# do evaluation on single input/output pair
...
Args:
inputs (list): the inputs that's used to call the model.
outputs (list): the return value of `model(inputs)`
"""
pass
def evaluate(self):
"""
Evaluate/summarize the performance, after processing all input/output pairs.
Returns:
dict:
A new evaluator class can return a dict of arbitrary format
as long as the user can process the results.
In our train_net.py, we expect the following format:
* key: the name of the task (e.g., bbox)
* value: a dict of {metric name: score}, e.g.: {"AP50": 80}
"""
pass
class DatasetEvaluators(DatasetEvaluator):
"""
Wrapper class to combine multiple :class:`DatasetEvaluator` instances.
This class dispatches every evaluation call to
all of its :class:`DatasetEvaluator`.
"""
def __init__(self, evaluators):
"""
Args:
evaluators (list): the evaluators to combine.
"""
super().__init__()
self._evaluators = evaluators
def reset(self):
for evaluator in self._evaluators:
evaluator.reset()
def process(self, inputs, outputs):
for evaluator in self._evaluators:
evaluator.process(inputs, outputs)
def evaluate(self):
results = OrderedDict()
for evaluator in self._evaluators:
result = evaluator.evaluate()
if is_main_process() and result is not None:
for k, v in result.items():
assert (
k not in results
), "Different evaluators produce results with the same key {}".format(k)
results[k] = v
return results
def inference_on_dataset(
model,
data_loader,
evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None],
callbacks=None,
):
"""
Run model on the data_loader and evaluate the metrics with evaluator.
Also benchmark the inference speed of `model.__call__` accurately.
The model will be used in eval mode.
Args:
model (callable): a callable which takes an object from
`data_loader` and returns some outputs.
If it's an nn.Module, it will be temporarily set to `eval` mode.
If you wish to evaluate a model in `training` mode instead, you can
wrap the given model and override its behavior of `.eval()` and `.train()`.
data_loader: an iterable object with a length.
The elements it generates will be the inputs to the model.
evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark,
but don't want to do any evaluation.
callbacks (dict of callables): a dictionary of callback functions which can be
called at each stage of inference.
Returns:
The return value of `evaluator.evaluate()`
"""
| num_devices = get_world_size() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: upfusion3d/upfusion
# Path: control_net/ldm/modules/diffusionmodules/util.py
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
# select alphas for computing the variance schedule
alphas = alphacums[ddim_timesteps]
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
# according the the formula provided in https://arxiv.org/abs/2010.02502
sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
if verbose:
print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
print(f'For the chosen value of eta, which is {eta}, '
f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
return sigmas, alphas, alphas_prev
# Path: control_net/ldm/modules/diffusionmodules/util.py
def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
if ddim_discr_method == 'uniform':
c = num_ddpm_timesteps // num_ddim_timesteps
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
elif ddim_discr_method == 'quad':
ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
else:
raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
# add one to get the final alpha values right (the ones from first scale to data during sampling)
steps_out = ddim_timesteps + 1
if verbose:
print(f'Selected timesteps for ddim sampler: {steps_out}')
return steps_out
# Path: control_net/ldm/modules/diffusionmodules/util.py
def noise_like(shape, device, repeat=False):
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
noise = lambda: torch.randn(shape, device=device)
return repeat_noise() if repeat else noise()
# Path: control_net/ldm/modules/diffusionmodules/util.py
def extract_into_tensor(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
# Path: control_net/cldm/ddim_hacked.py
import torch
import numpy as np
from tqdm import tqdm
from control_net.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
"""SAMPLING ONLY."""
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
| self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: modelscope/normal-depth-diffusion
# Path: libs/ControlNet-v1-1-nightly/annotator/normalbae/models/submodules/submodules.py
class UpSampleBN(nn.Module):
def __init__(self, skip_input, output_features):
super(UpSampleBN, self).__init__()
self._net = nn.Sequential(
nn.Conv2d(
skip_input,
output_features,
kernel_size=3,
stride=1,
padding=1), nn.BatchNorm2d(output_features), nn.LeakyReLU(),
nn.Conv2d(
output_features,
output_features,
kernel_size=3,
stride=1,
padding=1), nn.BatchNorm2d(output_features), nn.LeakyReLU())
def forward(self, x, concat_with):
up_x = F.interpolate(
x,
size=[concat_with.size(2),
concat_with.size(3)],
mode='bilinear',
align_corners=True)
f = torch.cat([up_x, concat_with], dim=1)
return self._net(f)
# Path: libs/ControlNet-v1-1-nightly/annotator/normalbae/models/submodules/submodules.py
def norm_normalize(norm_out):
min_kappa = 0.01
norm_x, norm_y, norm_z, kappa = torch.split(norm_out, 1, dim=1)
norm = torch.sqrt(norm_x**2.0 + norm_y**2.0 + norm_z**2.0) + 1e-10
kappa = F.elu(kappa) + 1.0 + min_kappa
final_out = torch.cat([norm_x / norm, norm_y / norm, norm_z / norm, kappa],
dim=1)
return final_out
# Path: libs/ControlNet-v1-1-nightly/annotator/normalbae/models/baseline.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from .submodules.submodules import UpSampleBN, norm_normalize
# This is the baseline encoder-decoder we used in the ablation study
class NNET(nn.Module):
def __init__(self, args=None):
super(NNET, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder(num_classes=4)
def forward(self, x, **kwargs):
out = self.decoder(self.encoder(x), **kwargs)
# Bilinearly upsample the output to match the input resolution
up_out = F.interpolate(
out,
size=[x.size(2), x.size(3)],
mode='bilinear',
align_corners=False)
# L2-normalize the first three channels / ensure positive value for concentration parameters (kappa)
up_out = norm_normalize(up_out)
return up_out
def get_1x_lr_params(self): # lr/10 learning rate
return self.encoder.parameters()
def get_10x_lr_params(self): # lr learning rate
modules = [self.decoder]
for m in modules:
yield from m.parameters()
# Encoder
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
basemodel_name = 'tf_efficientnet_b5_ap'
basemodel = torch.hub.load(
'rwightman/gen-efficientnet-pytorch',
basemodel_name,
pretrained=True)
# Remove last layer
basemodel.global_pool = nn.Identity()
basemodel.classifier = nn.Identity()
self.original_model = basemodel
def forward(self, x):
features = [x]
for k, v in self.original_model._modules.items():
if (k == 'blocks'):
for ki, vi in v._modules.items():
features.append(vi(features[-1]))
else:
features.append(v(features[-1]))
return features
# Decoder (no pixel-wise MLP, no uncertainty-guided sampling)
class Decoder(nn.Module):
def __init__(self, num_classes=4):
super(Decoder, self).__init__()
self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0)
| self.up1 = UpSampleBN(skip_input=2048 + 176, output_features=1024) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: daswer123/xtts-webui
# Path: scripts/resemble_enhance/inference.py
def inference(model, dwav, sr, device, chunk_seconds: float = 30.0, overlap_seconds: float = 1.0):
remove_weight_norm_recursively(model)
hp: HParams = model.hp
dwav = resample(
dwav,
orig_freq=sr,
new_freq=hp.wav_rate,
lowpass_filter_width=64,
rolloff=0.9475937167399596,
resampling_method="sinc_interp_kaiser",
beta=14.769656459379492,
)
del sr # We are now using hp.wav_rate as the sampling rate
sr = hp.wav_rate
if torch.cuda.is_available():
torch.cuda.synchronize()
start_time = time.perf_counter()
chunk_length = int(sr * chunk_seconds)
overlap_length = int(sr * overlap_seconds)
hop_length = chunk_length - overlap_length
chunks = []
for start in trange(0, dwav.shape[-1], hop_length):
new_chunk = inference_chunk(model, dwav[start : start + chunk_length], sr, device)
chunks.append(new_chunk)
# Delete the processed segment to free up memory
# del new_chunk
# if torch.cuda.is_available():
# torch.cuda.empty_cache()
# Force garbage collection at this point (optional and may slow down processing)
# gc.collect()
hwav = merge_chunks(chunks, chunk_length, hop_length, sr=sr,length=dwav.shape[-1])
# Clean up chunks to free memory after merging
del chunks[:]
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect() # Explicitly call garbage collector again
elapsed_time = time.perf_counter() - start_time
logger.info(f"Elapsed time: {elapsed_time:.3f} s, {hwav.shape[-1] / elapsed_time / 1000:.3f} kHz")
return hwav, sr
# Path: scripts/resemble_enhance/denoiser/train.py
def load_G(run_dir: Path, hp: HParams | None = None, training=True):
def save_wav(path: Path, wav: Tensor, rate: int):
def main():
def feed_G(engine: Engine, batch: dict[str, Tensor]):
def eval_fn(engine: Engine, eval_dir, n_saved=10):
# Path: scripts/resemble_enhance/denoiser/inference.py
import logging
import torch
from functools import cache
from ..inference import inference
from .train import Denoiser, HParams
logger = logging.getLogger(__name__)
@cache
def load_denoiser(run_dir, device):
if run_dir is None:
| return Denoiser(HParams()) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: FrozenBurning/PrimDiffusion
# Path: dva/attr_dict.py
class AttrDict:
def __init__(self, entries):
self.add_entries_(entries)
def keys(self):
return self.__dict__.keys()
def values(self):
return self.__dict__.values()
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
return self.__dict__.__delitem__(key)
def __contains__(self, key):
return key in self.__dict__
def __repr__(self):
return self.__dict__.__repr__()
def __getattr__(self, attr):
if attr.startswith("__"):
return self.__getattribute__(attr)
return self.__dict__[attr]
def items(self):
return self.__dict__.items()
def __iter__(self):
return iter(self.items())
def add_entries_(self, entries, overwrite=True):
for key, value in entries.items():
if key not in self.__dict__:
if isinstance(value, dict):
self.__dict__[key] = AttrDict(value)
else:
self.__dict__[key] = value
else:
if isinstance(value, dict):
self.__dict__[key].add_entries_(entries=value, overwrite=overwrite)
elif overwrite or self.__dict__[key] is None:
self.__dict__[key] = value
def serialize(self):
return json.dumps(self, default=self.obj_to_dict, indent=4)
def obj_to_dict(self, obj):
return obj.__dict__
def get(self, key, default=None):
return self.__dict__.get(key, default)
# Path: dva/geom.py
def compute_v2uv(n_verts, vi, vti, n_max=4):
"""Computes mapping from vertex indices to texture indices.
Args:
vi: [F, 3], triangles
vti: [F, 3], texture triangles
n_max: int, max number of texture locations
Returns:
[n_verts, n_max], texture indices
"""
v2uv_dict = {}
for i_v, i_uv in zip(vi.reshape(-1), vti.reshape(-1)):
v2uv_dict.setdefault(i_v, set()).add(i_uv)
assert len(v2uv_dict) == n_verts
v2uv = np.zeros((n_verts, n_max), dtype=np.int32)
for i in range(n_verts):
vals = sorted(list(v2uv_dict[i]))
v2uv[i, :] = vals[0]
v2uv[i, : len(vals)] = np.array(vals)
return v2uv
# Path: dva/geom.py
def compute_neighbours(n_verts, vi, n_max_values=10):
"""Computes first-ring neighbours given vertices and faces."""
n_vi = vi.shape[0]
adj = {i: set() for i in range(n_verts)}
for i in range(n_vi):
for idx in vi[i]:
adj[idx] |= set(vi[i]) - set([idx])
nbs_idxs = np.tile(np.arange(n_verts)[:, np.newaxis], (1, n_max_values))
nbs_weights = np.zeros((n_verts, n_max_values), dtype=np.float32)
for idx in range(n_verts):
n_values = min(len(adj[idx]), n_max_values)
nbs_idxs[idx, :n_values] = np.array(list(adj[idx]))[:n_values]
nbs_weights[idx, :n_values] = -1.0 / n_values
return nbs_idxs, nbs_weights
# Path: dva/io.py
import json
import cv2
import numpy as np
import copy
import importlib
import pickle
import os
from typing import Any, Dict
from dva.attr_dict import AttrDict
from dva.geom import compute_v2uv, compute_neighbours
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def load_module(module_name, class_name=None, silent: bool = False):
module = importlib.import_module(module_name)
return getattr(module, class_name) if class_name else module
def load_class(class_name):
return load_module(*class_name.rsplit(".", 1))
def load_from_config(config, **kwargs):
"""Instantiate an object given a config and arguments."""
assert "class_name" in config and "module_name" not in config
config = copy.deepcopy(config)
class_name = config.pop("class_name")
object_class = load_class(class_name)
return object_class(**config, **kwargs)
def load_opencv_calib(extrin_path, intrin_path):
cameras = {}
fse = cv2.FileStorage()
fse.open(extrin_path, cv2.FileStorage_READ)
fsi = cv2.FileStorage()
fsi.open(intrin_path, cv2.FileStorage_READ)
names = [
fse.getNode("names").at(c).string() for c in range(fse.getNode("names").size())
]
for camera in names:
rot = fse.getNode(f"R_{camera}").mat()
R = fse.getNode(f"Rot_{camera}").mat()
T = fse.getNode(f"T_{camera}").mat()
R_pred = cv2.Rodrigues(rot)[0]
assert np.all(np.isclose(R_pred, R))
K = fsi.getNode(f"K_{camera}").mat()
cameras[camera] = {
"Rt": np.concatenate([R, T], axis=1).astype(np.float32),
"K": K.astype(np.float32),
}
return cameras
def load_smpl_params(params):
return {
k: np.array(v[0], dtype=np.float32) for k, v in params[0].items() if k != "id"
}
def load_smpl_topology(data_struct) -> Dict[str, Any]:
# TODO: compute_
topology = {
"vi": data_struct["f"].astype(np.int64),
"vti": data_struct["ft"].astype(np.int64),
"vt": data_struct["vt"].astype(np.float32),
"n_verts": data_struct["v_template"].shape[0],
}
| topology["v2uv"] = compute_v2uv( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Nearcyan/papers.day
# Path: backend/models.py
class ArxivPaper(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
arxiv_id = models.CharField(max_length=20, unique=True)
# fields scraped from the paper page:
title = models.CharField(max_length=255, db_index=True)
abstract = models.TextField(db_index=True)
authors = models.ManyToManyField(Author)
primary_subject = models.ForeignKey(Subject, on_delete=models.CASCADE, null=True, blank=True)
subjects = models.ManyToManyField(Subject, related_name="papers")
comment = models.TextField(null=True, blank=True)
doi = models.CharField(max_length=255, null=True, blank=True)
journal_ref = models.CharField(max_length=255, null=True, blank=True)
publication_date = models.DateField()
# fields we create
summary = models.TextField(db_index=True)
total_author_citations = models.IntegerField(default=0, db_index=True)
citations = models.IntegerField(default=0, db_index=True)
# file fields
pdf = models.FileField(upload_to="pdfs", null=True, blank=True)
screenshot = models.ImageField(upload_to="screenshots", null=True, blank=True)
source_tar = models.FileField(upload_to="tar_sources", null=True, blank=True)
images = models.ManyToManyField(PaperImage, related_name="paper_images")
sources = models.ManyToManyField(PaperSource, related_name="paper_sources")
def abstract_link(self) -> str:
return f"https://arxiv.org/abs/{self.arxiv_id}"
def pdf_link(self) -> str:
return f"https://arxiv.org/pdf/{self.arxiv_id}.pdf"
def source_link(self) -> str:
return f"https://arxiv.org/e-print/{self.arxiv_id}"
def __str__(self):
return self.title
# Path: backend/models.py
class Author(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
name = models.CharField(max_length=255, db_index=True)
affiliation = models.CharField(max_length=255, null=True, blank=True, db_index=True)
email = models.EmailField(null=True, blank=True)
email_domain = models.CharField(max_length=255, null=True, blank=True, db_index=True)
citations = models.IntegerField(default=0, db_index=True)
scholar_id = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return self.name
# Path: backend/models.py
class Subject(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
short_name = models.CharField(max_length=255)
full_name = models.CharField(max_length=255)
def __str__(self):
return self.full_name
# Path: backend/models.py
class PaperImage(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
image = models.ImageField(upload_to="images")
paper = models.ForeignKey("ArxivPaper", on_delete=models.CASCADE)
# Path: backend/models.py
class PaperSource(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
content = models.TextField()
paper = models.ForeignKey("ArxivPaper", on_delete=models.CASCADE)
# Path: backend/admin.py
from django.contrib import admin
from .models import ArxivPaper, Author, Subject, PaperImage, PaperSource
class ArxivPaperAdmin(admin.ModelAdmin):
list_display = ('title', 'citations', 'total_author_citations', 'summary', 'publication_date', 'arxiv_id',
'created_at')
search_fields = ('title', 'abstract', 'arxiv_id')
readonly_fields = ('created_at', 'modified_at')
ordering = ('-publication_date',)
list_filter = ('publication_date', 'created_at', 'citations', 'total_author_citations')
class SubjectAdmin(admin.ModelAdmin):
list_display = ('short_name', 'full_name')
search_fields = ('short_name', 'full_name')
ordering = ('short_name',)
class AuthorAdmin(admin.ModelAdmin):
list_display = ('name', 'affiliation', 'email', 'email_domain', 'citations', 'scholar_id')
search_fields = ('name', 'affiliation', 'email', 'email_domain', 'citations', 'scholar_id')
ordering = ('name',)
class PaperImageAdmin(admin.ModelAdmin):
list_display = ('image', 'paper')
search_fields = ('image', 'paper')
ordering = ('image',)
class PaperSourceAdmin(admin.ModelAdmin):
list_display = ('paper',)
search_fields = ('paper',)
| admin.site.register(ArxivPaper, ArxivPaperAdmin) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: LSimon95/megatts2
# Path: models/megatts2.py
class MegaVQ(nn.Module):
def __init__(
self,
mrte: MRTE,
vqpe: VQProsodyEncoder,
decoder: ConvNet,
):
super(MegaVQ, self).__init__()
self.mrte = mrte
self.vqpe = vqpe
self.decoder = decoder
def forward(
self,
duration_tokens: torch.Tensor, # (B, T)
text: torch.Tensor, # (B, T)
text_lens: torch.Tensor, # (B,)
mel_mrte: torch.Tensor, # (B, T, mel_bins)
mel_lens_mrte: torch.Tensor, # (B,)
mel_vqpe: torch.Tensor, # (B, T, mel_bins)
):
zq, commit_loss, vq_loss = self.vqpe(mel_vqpe)
x = self.mrte(duration_tokens, text, text_lens,
mel_mrte, mel_lens_mrte)
x = torch.cat([x, zq], dim=-1)
x = rearrange(x, 'B T D -> B D T')
x = self.decoder(x)
x = rearrange(x, 'B D T -> B T D')
return x, commit_loss, vq_loss
# Path: modules/dscrm.py
class Discriminator(nn.Module):
def __init__(self, time_lengths=[32, 64, 128], freq_length=80, kernel=(3, 3), c_in=1,
hidden_size=128):
super(Discriminator, self).__init__()
self.time_lengths = time_lengths
self.discriminator = MultiWindowDiscriminator(
freq_length=freq_length,
time_lengths=time_lengths,
kernel=kernel,
c_in=c_in, hidden_size=hidden_size
)
def forward(self, x, start_frames_wins=None):
"""
:param x: [B, T, 80]
:param return_y_only:
:return:
"""
if len(x.shape) == 3:
x = x[:, None, :, :] # [B,1,T,80]
x_len = x.sum([1, -1]).ne(0).int().sum([-1])
ret = {'y_c': None, 'y': None}
ret['y'], start_frames_wins, ret['h'] = self.discriminator(
x, x_len, start_frames_wins=start_frames_wins)
ret['start_frames_wins'] = start_frames_wins
return ret
# Path: utils/utils.py
def plot_spectrogram_to_numpy(spec_target: np.ndarray, spec_output: np.ndarray) -> np.ndarray:
"""
Plot a spectrogram and convert it to a numpy array.
Args:
spectrogram (ndarray): Spectrogram data.
Returns:
ndarray: Numpy array representing the plotted spectrogram.
"""
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 12))
ax1.set_title("Target")
im = ax1.imshow(spec_target.astype(np.float32), aspect="auto", origin="lower", interpolation="none")
plt.colorbar(im, ax=ax1)
plt.xlabel("Frames")
plt.ylabel("Channels")
ax2.set_title("Output")
im = ax2.imshow(spec_output.astype(np.float32), aspect="auto", origin="lower", interpolation="none")
plt.colorbar(im, ax=ax2)
plt.xlabel("Frames")
plt.ylabel("Channels")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
# Path: models/trainer.py
import lightning.pytorch as pl
import torch
import torchaudio
import torch.nn.functional as F
import transformers
import numpy as np
import math
from .megatts2 import MegaVQ
from modules.dscrm import Discriminator
from utils.utils import plot_spectrogram_to_numpy
class MegaGANTrainer(pl.LightningModule):
def __init__(
self,
| G: MegaVQ, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: wanghao-cst/Omni-VideoAssistant
# Path: llava/constants.py
CONTROLLER_HEART_BEAT_EXPIRATION = 30
# Path: llava/utils.py
def build_logger(logger_name, logger_filename):
def __init__(self, logger, log_level=logging.INFO):
def __getattr__(self, attr):
def write(self, buf):
def flush(self):
def disable_torch_init():
def violates_moderation(text):
def pretty_print_semaphore(semaphore):
class StreamToLogger(object):
# Path: llava/serve/controller.py
import argparse
import asyncio
import dataclasses
import json
import logging
import time
import threading
import numpy as np
import requests
import uvicorn
from enum import Enum, auto
from typing import List, Union
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
from llava.constants import CONTROLLER_HEART_BEAT_EXPIRATION
from llava.utils import build_logger, server_error_msg
if not worker_status:
return False
self.worker_info[worker_name] = WorkerInfo(
worker_status["model_names"], worker_status["speed"], worker_status["queue_length"],
check_heart_beat, time.time())
logger.info(f"Register done: {worker_name}, {worker_status}")
return True
def get_worker_status(self, worker_name: str):
try:
r = requests.post(worker_name + "/worker_get_status", timeout=5)
except requests.exceptions.RequestException as e:
logger.error(f"Get status fails: {worker_name}, {e}")
return None
if r.status_code != 200:
logger.error(f"Get status fails: {worker_name}, {r}")
return None
return r.json()
def remove_worker(self, worker_name: str):
del self.worker_info[worker_name]
def refresh_all_workers(self):
old_info = dict(self.worker_info)
self.worker_info = {}
for w_name, w_info in old_info.items():
if not self.register_worker(w_name, w_info.check_heart_beat, None):
logger.info(f"Remove stale worker: {w_name}")
def list_models(self):
model_names = set()
for w_name, w_info in self.worker_info.items():
model_names.update(w_info.model_names)
return list(model_names)
def get_worker_address(self, model_name: str):
if self.dispatch_method == DispatchMethod.LOTTERY:
worker_names = []
worker_speeds = []
for w_name, w_info in self.worker_info.items():
if model_name in w_info.model_names:
worker_names.append(w_name)
worker_speeds.append(w_info.speed)
worker_speeds = np.array(worker_speeds, dtype=np.float32)
norm = np.sum(worker_speeds)
if norm < 1e-4:
return ""
worker_speeds = worker_speeds / norm
if True: # Directly return address
pt = np.random.choice(np.arange(len(worker_names)),
p=worker_speeds)
worker_name = worker_names[pt]
return worker_name
# Check status before returning
while True:
pt = np.random.choice(np.arange(len(worker_names)),
p=worker_speeds)
worker_name = worker_names[pt]
if self.get_worker_status(worker_name):
break
else:
self.remove_worker(worker_name)
worker_speeds[pt] = 0
norm = np.sum(worker_speeds)
if norm < 1e-4:
return ""
worker_speeds = worker_speeds / norm
continue
return worker_name
elif self.dispatch_method == DispatchMethod.SHORTEST_QUEUE:
worker_names = []
worker_qlen = []
for w_name, w_info in self.worker_info.items():
if model_name in w_info.model_names:
worker_names.append(w_name)
worker_qlen.append(w_info.queue_length / w_info.speed)
if len(worker_names) == 0:
return ""
min_index = np.argmin(worker_qlen)
w_name = worker_names[min_index]
self.worker_info[w_name].queue_length += 1
logger.info(f"names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}")
return w_name
else:
raise ValueError(f"Invalid dispatch method: {self.dispatch_method}")
def receive_heart_beat(self, worker_name: str, queue_length: int):
if worker_name not in self.worker_info:
logger.info(f"Receive unknown heart beat. {worker_name}")
return False
self.worker_info[worker_name].queue_length = queue_length
self.worker_info[worker_name].last_heart_beat = time.time()
logger.info(f"Receive heart beat. {worker_name}")
return True
def remove_stable_workers_by_expiration(self):
expire = time.time() - CONTROLLER_HEART_BEAT_EXPIRATION
to_delete = []
for worker_name, w_info in self.worker_info.items():
if w_info.check_heart_beat and w_info.last_heart_beat < expire:
to_delete.append(worker_name)
for worker_name in to_delete:
self.remove_worker(worker_name)
def worker_api_generate_stream(self, params):
worker_addr = self.get_worker_address(params["model"])
if not worker_addr:
logger.info(f"no worker: {params['model']}")
ret = {
| "text": server_error_msg, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: RobertCsordas/moe_attention
# Path: layers/transformer/multi_head_attention.py
class MultiHeadAttention(AttentionMergeMixin, AbsPosAttentionBase):
def __init__(self, state_size: int, n_heads: int, dropout: float = 0.1, input_size: Optional[int] = None,
out_size: Optional[int] = None):
super(AbsPosAttentionBase, self).__init__(state_size, n_heads, dropout)
self.data_to_kv = torch.nn.Linear(state_size, 2 * n_heads * self.projection_size, bias=False)
self.data_to_q = torch.nn.Linear(input_size or state_size, n_heads * self.projection_size, bias=False)
super(MultiHeadAttention, self).__init__(out_size)
self.reset_parameters()
def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],
need_weights: bool = False):
# Input and output shape: [n_batch, n_steps, data_size]
k, v = self.transform_data(attend_to, self.data_to_kv, 2)
q, = self.transform_data(curr_state, self.data_to_q, 1)
data, scores = self.merged_attention(curr_state.shape[0], q.shape[1], mask, q, k, v)
if need_weights:
return data, scores
else:
return data
def reset_parameters(self):
# super().reset_parameters()
torch.nn.init.xavier_uniform_(self.data_to_q.weight)
torch.nn.init.xavier_uniform_(self.data_to_kv.weight)
torch.nn.init.xavier_uniform_(self.data_to_kv.weight)
# Path: layers/transformer/multi_head_attention.py
class AttentionMask:
src_length_mask: Optional[torch.Tensor]
position_mask: Optional[torch.Tensor]
# Path: layers/transformer/transformer.py
import torch
import torch.nn
import torch.nn.functional as F
from .multi_head_attention import MultiHeadAttention, AttentionMask
from typing import Optional, Callable, Dict, Type, Sequence, Union
from dataclasses import dataclass
# This file is based on PyTorch's internal implementation
ActivationFunction = Callable[[torch.Tensor], torch.Tensor]
class TransformerEncoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,
attention_dropout=0):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiHeadAttention(d_model, nhead, dropout=attention_dropout)
self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
self.dropout = torch.nn.Dropout(dropout)
self.linear2 = torch.nn.Linear(dim_feedforward, d_model)
self.norm1 = torch.nn.LayerNorm(d_model)
self.norm2 = torch.nn.LayerNorm(d_model)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.activation = activation
self.reset_parameters()
| def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None) -> torch.Tensor: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: riccardomusmeci/mlx-llm
# Path: src/mlx_llm/model/phi2.py
def phi2() -> Phi2:
return Phi2(
dim=2560,
vocab_size=51200,
n_heads=32,
n_layers=32,
rotary_dim=32
)
# Path: src/mlx_llm/model/transformer.py
def llama_2_7B_chat() -> Transformer:
return Transformer(
dim=4096,
hidden_dim=11008,
vocab_size=32000,
n_layers=32,
n_heads=32,
n_kv_heads=32,
head_dim=128,
norm_eps=1e-5
)
# Path: src/mlx_llm/model/transformer.py
def tiny_llama_chat_v06() -> Transformer:
return Transformer(
dim=2048,
hidden_dim=5632,
n_heads=32,
n_kv_heads=4,
n_layers=22,
vocab_size=32000,
head_dim=64, # 2048 / 32,
norm_eps=1e-5,
rope_traditional=False
)
# Path: src/mlx_llm/model/transformer.py
def openhermes_25_mistral_7B() -> Transformer:
return Transformer(
dim=4096,
hidden_dim=14336,
vocab_size=32002,
n_layers=32,
n_heads=32,
n_kv_heads=8,
head_dim=128,
norm_eps=1e-5
)
# Path: src/mlx_llm/model/transformer.py
def mistral_7B_instruct_v02() -> Transformer:
return Transformer(
dim=4096,
hidden_dim=14336,
vocab_size=32000,
n_layers=32,
n_heads=32,
n_kv_heads=8,
head_dim=128,
norm_eps=1e-5
)
# Path: src/mlx_llm/model/transformer.py
def e5_mistral_7b_instruct() -> Transformer:
return Transformer(
dim=4096,
hidden_dim=14336,
vocab_size=32000,
n_layers=32,
n_heads=32,
n_kv_heads=8,
head_dim=128,
norm_eps=1e-5
)
# Path: src/mlx_llm/model/_registry.py
from .phi2 import phi2
from .transformer import (
llama_2_7B_chat,
tiny_llama_chat_v06,
openhermes_25_mistral_7B,
# mistral_7B_instruct_v01,
mistral_7B_instruct_v02,
e5_mistral_7b_instruct
)
MODEL_ENTRYPOINTS = {
"Phi2": phi2,
"LLaMA-2-7B-chat": llama_2_7B_chat,
"TinyLlama-1.1B-Chat-v0.6": tiny_llama_chat_v06,
# "Mistral-7B-Instruct-v0.1": mistral_7B_instruct_v01,
| "Mistral-7B-Instruct-v0.2": mistral_7B_instruct_v02, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: xetdata/xetcache
# Path: xetcache/util.py
def hash_anything(x):
return hashlib.sha256(pickle.dumps(x)).hexdigest()
# Path: xetcache/util.py
def probe_memo(memopath, inputhashstr, key=None):
"""
Locate the memo from the provided input.
"""
memo_file = inputhashstr + '.pickle'
if key is None:
full_memo_file = os.path.join(memopath, inputhashstr + '.pickle')
else:
key = str(key)
full_memo_file = os.path.join(memopath, key, inputhashstr + '.pickle')
if full_memo_file.startswith("xet://"):
try:
openfile = fsspec.open(full_memo_file, 'rb')
fbytestr = None
with openfile as f:
print(f"Loading from {memo_file}")
# reading from a string first will avoid potential tiny
# reads that are extraordinarily slow
fbytestr = f.read()
result = pickle.loads(fbytestr)
return result
except Exception as e:
if str("404 Not Found") in str(e):
return None
print(f'Failed to load: {e}')
return None
elif os.path.exists(full_memo_file):
if file_is_pointer_file(full_memo_file):
materialized = materialize_pointer_file(full_memo_file)
else:
materialized = True
if materialized:
with open(full_memo_file, 'rb') as f:
print(f"Loading from {memo_file}")
result = pickle.load(f)
return result
return None
# Path: xetcache/util.py
def store_memo(memopath, inputhashstr, store, key):
"""
Locate the memo from the provided input.
"""
memo_file = inputhashstr + '.pickle'
if key is None:
full_memo_file = os.path.join(memopath, inputhashstr + '.pickle')
else:
key = str(key)
full_memo_file = os.path.join(memopath, key, inputhashstr + '.pickle')
memopath = os.path.join(memopath, key)
if full_memo_file.startswith("xet://"):
fs = fsspec.filesystem("xet")
with fs.transaction:
openfile = fsspec.open(full_memo_file, 'wb')
with openfile as f:
print(f"Writing to {memo_file}")
pickle.dump(store, f)
else:
os.makedirs(memopath, exist_ok=True)
with open(full_memo_file, 'wb') as f:
print(f"Writing to {memo_file}")
pickle.dump(store, f)
return None
# Path: xetcache/config.py
def get_memo_path():
"""
Reads the current memo path
"""
return _MEMOPATH
# Path: xetcache/config.py
def get_runtime_threshold():
"""
Reads the current runtime threshold in seconds.
Only functions or cells which run longer than this will be cached.
"""
return _RUNTIME_THRESHOLD_SEC
# Path: xetcache/xetmemo_kernel_extension.py
import os
import time
from .util import hash_anything, probe_memo, store_memo
from .config import get_memo_path, get_runtime_threshold
from IPython.core.magic import Magics, magics_class, cell_magic
@magics_class
class XMemoMagics(Magics):
"""Memoization for data science tasks
%load_ext xetcache
to load the extension
"""
def __init__(self, *args, **kwargs):
print(self.xetmemo.__doc__)
memopath = get_memo_path()
print(f"Memoizing to {memopath}")
super().__init__(*args, **kwargs)
@cell_magic
def xetmemo(self, line, cell):
'''
Usage:
%%xetmemo input=v1,v2 output=v3,v4
Caches the specified output variables each time it is called.
If called later with the same inputs , the cached value is returned
and not reevaluated. This is persistent across Python runs.
Any content changes to the input input variables, or cell code will
force reevaluation of the cell. Otherwise the outputs will simply be
retrieved from the memo.
This memo is persistent across Python processes and if XetHub is used
see `xetcache.set_xet_project`, can be shared with others.
For performance reasons, only functions which take more than 3
seconds (configurable from config.set_runtime_threshold) will be
cached. "always=True" can be added to the xetmemo arguments to
ignore the runime and to always cache
%%xetmemo input=v1,v2 output=v3,v4 always=True
Note that inputs can be anything picklable including functions.
A key parameter can be added to group the stored objects together.
Objects stored with one key will not be retrievable with a different
key
%%xetmemo input=v1,v2 output=v3,v4 always=True key=experiment1
Also see the `xetcache.xetmemo` decorator for a version that can be
used as a function decorator
'''
# parse the argument list
args = line.strip().split(' ')
inputvars = []
outputvars = []
ip = self.shell
always = False
key = None
for arg in args:
k, v = arg.split('=')
if k == 'input':
inputvars = [x.strip() for x in v.split(',')]
elif k == 'output':
outputvars = [x.strip() for x in v.split(',')]
elif k == 'always':
always = (v.strip() == 'True')
elif k == 'key':
key = v.strip()
else:
raise RuntimeError(f'Unexpected xmemo key type {k}')
# we hash the xetmemo line, and the contents of the cell
# and all the variables in the input line
| inputhashes = [hash_anything(line), hash_anything(cell)] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: open-compass/T-Eval
# Path: teval/utils/format_load.py
def format_load(raw_data: str, start_character: str = '', end_character: str = ''):
"""Format the raw data into the format that can be evaluated.
Args:
raw_data (str): The raw data.
start_character (str, optional): The start character. Defaults to '', if using it, the string will be sliced from the first start_character.
end_character (str, optional): The end character. Defaults to '', if using it, the string will be sliced to the last end_character.
Returns:
str: The formatted data.
"""
if type(raw_data) != str:
# the data has been evaluated
return raw_data
if "```json" in raw_data:
raw_data = raw_data[raw_data.find("```json") + len("```json"):]
raw_data = raw_data.strip("`")
if start_character != '':
raw_data = raw_data[raw_data.find(start_character):]
if end_character != '':
raw_data = raw_data[:raw_data.rfind(end_character) + len(end_character)]
successful_parse = False
try:
data = ast.literal_eval(raw_data)
successful_parse = True
except Exception as e:
pass
try:
if not successful_parse:
data = json.loads(raw_data)
successful_parse = True
except Exception as e:
pass
try:
if not successful_parse:
data = json.loads(raw_data.replace("\'", "\""))
successful_parse = True
except Exception as e:
pass
if not successful_parse:
raise Exception("Cannot parse raw data")
return data
# Path: teval/schema.py
class ResponseDataSample:
"""
Args:
template(str): Format string with keyword-only arguments. For
example '{who} like {what}'
pred(Any): Parsed data from LLM generating response.
gt(Any): Ground truth data
meta_data(dict, optional): Meta information will be used to evaluate
LLM's response
"""
template: str
pred: Any
gt: Any
meta_data: dict = None
# Path: teval/evaluators/planning_evaluator.py
from collections import defaultdict
from numpy import mean
from mmengine import load
from teval.utils.format_load import format_load
from tqdm import tqdm
from teval.schema import ResponseDataSample
from sentence_transformers import SentenceTransformer, util
import json
import itertools
import networkx as nx
import numpy as np
import copy
import json
import re
# import evaluate
class PlanningEvaluator:
"""Planning Evaluation
Args:
dataset_path(str): File path of evaluation dataset
name_weight(float): the weight of action_name in bert_score match, default = 0.9
args_weight(float): the weight of action_args in bert_score match, default = 0.1
match_threshold(float): the threshold of matching
match_strategy(str): matching method, can choose 'bertscore' or 'permutation'
bert_score_model(str): the bert_score model for sentence similarity, default = "all-mpnet-base-v2".
Refer to https://www.sbert.net/docs/pretrained_models.html for more models.
"""
def __init__(
self,
dataset_path: str,
name_weight = 0.75,
args_weight = 0.25,
match_threshold = 0.7,
match_strategy: str = 'bertscore', # ["bertscore", "permutation"]
bert_score_model: str = "all-mpnet-base-v2", # ['thenlper/gte-large-zh', 'all-mpnet-base-v2']
default_prompt_type: str = 'json', # ["json", "ReWOO"]
**kwargs,
) -> None:
self.bert_score_model = bert_score_model
print(bert_score_model)
self.dataset_path = dataset_path
self.name_weight = name_weight
self.args_weight = args_weight
self.match_threshold = match_threshold
self.default_prompt_type = default_prompt_type # ["json", "ReWOO"]
assert match_strategy in ["bertscore", "permutation"], f"match strategy must in [\"bertscore\", \"permutation\"], but get {match_strategy}"
self.match_strategy = match_strategy
self.valid_data_count = None
self.sentence_model = SentenceTransformer(self.bert_score_model)
def _load_dataset(self):
self.dataset = []
dataset = load(self.dataset_path)
total_error = 0
total_count = 0
for key in dataset.keys():
datum = dataset[key]
data_sample, error = self._process_response(datum)
total_error += error
total_count += 1
self.dataset.append(
dict(response_data_sample=data_sample))
self.num_samples = len(self.dataset)
print("total_data_count:", total_count, "valid_data_count:", total_count - total_error)
self.valid_data_count = total_count - total_error
def format_load(self, data):
r'''
ensure evaluator can work correctly under any data input
'''
try:
json_format = format_load(data, start_character='[', end_character=']')
except Exception as e:
return []
if type(json_format) != list:
return []
for i in range(len(json_format)):
try:
json_format[i] = {
'name': str(json_format[i]['name']),
'id': int(json_format[i]['id']),
'args': str(json_format[i]['args'])
}
except Exception as e:
return []
return json_format
def _process_response(
self,
datum,
| ) -> ResponseDataSample: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: rabilrbl/gemini-pro-bot
# Path: gemini_pro_bot/llm.py
SAFETY_SETTINGS = {
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
}
# Path: gemini_pro_bot/html_format.py
def format_message(text: str) -> str:
"""Format the given message text from markdown to HTML.
Escapes HTML characters, applies link, code, and other rich text formatting,
and returns the formatted HTML string.
Args:
message (str): The plain text message to format.
Returns:
str: The formatted HTML string.
"""
formatted_text = escape_html(text)
formatted_text = apply_exclude_code(formatted_text)
formatted_text = apply_code(formatted_text)
return formatted_text
# Path: gemini_pro_bot/handlers.py
import asyncio
import PIL.Image as load_image
from gemini_pro_bot.llm import model, img_model
from google.generativeai.types.generation_types import (
StopCandidateException,
BlockedPromptException,
)
from telegram import Update
from telegram.ext import (
ContextTypes,
)
from telegram.error import NetworkError, BadRequest
from telegram.constants import ChatAction, ParseMode
from gemini_pro_bot.html_format import format_message
from io import BytesIO
def new_chat(context: ContextTypes.DEFAULT_TYPE) -> None:
context.chat_data["chat"] = model.start_chat()
async def start(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:
"""Send a message when the command /start is issued."""
user = update.effective_user
await update.message.reply_html(
f"Hi {user.mention_html()}!\n\nStart sending messages with me to generate a response.\n\nSend /new to start a new chat session.",
# reply_markup=ForceReply(selective=True),
)
async def help_command(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:
"""Send a message when the command /help is issued."""
help_text = """
Basic commands:
/start - Start the bot
/help - Get help. Shows this message
Chat commands:
/new - Start a new chat session (model will forget previously generated messages)
Send a message to the bot to generate a response.
"""
await update.message.reply_text(help_text)
async def newchat_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""Start a new chat session."""
init_msg = await update.message.reply_text(
text="Starting new chat session...",
reply_to_message_id=update.message.message_id,
)
new_chat(context)
await init_msg.edit_text("New chat session started.")
# Define the function that will handle incoming messages
async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""Handles incoming text messages from users.
Checks if a chat session exists for the user, initializes a new session if not.
Sends the user's message to the chat session to generate a response.
Streams the response back to the user, handling any errors.
"""
if context.chat_data.get("chat") is None:
new_chat(context)
text = update.message.text
init_msg = await update.message.reply_text(
text="Generating...", reply_to_message_id=update.message.message_id
)
await update.message.chat.send_action(ChatAction.TYPING)
# Generate a response using the text-generation pipeline
chat = context.chat_data.get("chat") # Get the chat session for this chat
response = None
try:
response = await chat.send_message_async(
text, stream=True
) # Generate a response
except StopCandidateException as sce:
print("Prompt: ", text, " was stopped. User: ", update.message.from_user)
print(sce)
await init_msg.edit_text("The model unexpectedly stopped generating.")
chat.rewind() # Rewind the chat session to prevent the bot from getting stuck
return
except BlockedPromptException as bpe:
print("Prompt: ", text, " was blocked. User: ", update.message.from_user)
print(bpe)
await init_msg.edit_text("Blocked due to safety concerns.")
if response:
# Resolve the response to prevent the chat session from getting stuck
await response.resolve()
return
full_plain_message = ""
# Stream the responses
async for chunk in response:
try:
if chunk.text:
full_plain_message += chunk.text
| message = format_message(full_plain_message) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nox-410/tvm.tl
# Path: python/tvm/_ffi/registry.py
def register_func(func_name, f=None, override=False):
"""Register global function
Parameters
----------
func_name : str or function
The function name
f : function, optional
The function to be registered.
override: boolean optional
Whether override existing entry.
Returns
-------
fregister : function
Register function if f is not specified.
Examples
--------
The following code registers my_packed_func as global function.
Note that we simply get it back from global function table to invoke
it from python side. However, we can also invoke the same function
from C++ backend, or in the compiled TVM code.
.. code-block:: python
targs = (10, 10.0, "hello")
@tvm.register_func
def my_packed_func(*args):
assert(tuple(args) == targs)
return 10
# Get it out from global function table
f = tvm.get_global_func("my_packed_func")
assert isinstance(f, tvm.PackedFunc)
y = f(*targs)
assert y == 10
"""
if callable(func_name):
f = func_name
func_name = f.__name__
if not isinstance(func_name, str):
raise ValueError("expect string function name")
ioverride = ctypes.c_int(override)
def register(myf):
"""internal register function"""
if not isinstance(myf, PackedFuncBase):
myf = convert_to_tvm_func(myf)
check_call(_LIB.TVMFuncRegisterGlobal(c_str(func_name), myf.handle, ioverride))
return myf
if f:
return register(f)
return register
# Path: python/tvm/target/codegen.py
def target_has_features(cpu_features, target=None):
"""Check CPU features for the target's `-mtriple` and `-mcpu` and `-mattr`.
Parameters
----------
target : Target
The TVM target.
cpu_features : str or Array
CPU Feature(s) to check.
Returns
-------
has_features : bool
True if target has the feature(s).
"""
assert isinstance(target, Target) or target is None
assert isinstance(cpu_features, (Array, list, tuple, str))
has_feats = True
cpu_features = [cpu_features] if isinstance(cpu_features, str) else cpu_features
for feat in cpu_features:
has_feats &= _ffi_api.target_has_feature(feat, target)
return has_feats
# Path: python/tvm/target/x86.py
from .._ffi import register_func
from .codegen import target_has_features
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common x86 related utilities"""
@register_func("tvm.topi.x86.utils.get_simd_32bit_lanes")
def get_simd_32bit_lanes():
"""X86 SIMD optimal vector length lookup.
Parameters
----------
Returns
-------
vec_len : int
The optimal vector length of CPU from the global context target.
"""
vec_len = 4
| if target_has_features(["avx512bw", "avx512f"]): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kakaobrain/honeybee
# Path: tasks/base_dataset.py
class TaskDataset(Dataset):
def build_prompt(self, question, image_prompt="Human: <image>"):
prompt = f"""{SYSTEM}
{image_prompt}
Human: {question}
AI: """
return prompt
def collate_fn(self, examples: list[Example]) -> Batch:
ids = [ex.id for ex in examples]
data = [ex.data for ex in examples]
images = [ex.image for ex in examples]
prompts = [ex.prompt for ex in examples]
inputs = self.processor(images=images, text=prompts)
batch = Batch(ids=ids, inputs=inputs, data=data)
return batch
# Path: tasks/base_dataset.py
class Example:
id: int # results will be sorted by id
image: Image
prompt: str
data: dict # answer and additional data -- will be included in results
# Path: tasks/mme/mme_dataset.py
from pathlib import Path
from PIL import Image
from tasks.base_dataset import TaskDataset, Example
import utils
EVAL_TYPE_DICT = {
"Perception": ["existence", "count", "position", "color", "posters", "celebrity", "scene", "landmark", "artwork", "OCR"],
"Cognition": ["commonsense_reasoning", "numerical_calculation", "text_translation", "code_reasoning"]
}
def load_subset(dir_path):
root = Path(dir_path)
dset_name = root.name
imgpaths = list(root.glob("**/*.jpg")) + list(root.glob("**/*.png"))
imgpaths = sorted(imgpaths)
def get_txtpath(imgpath):
txtpath = imgpath.with_suffix(".txt")
txtname = txtpath.name
if txtpath.exists():
return txtpath
if imgpath.parent.name == "images":
return imgpath.parent.parent / "questions_answers_YN" / txtname
raise ValueError(f"Cannot find txt path from image path `{imgpath}`")
data = []
for imgpath in imgpaths:
txtpath = get_txtpath(imgpath)
with txtpath.open(encoding="utf-8") as f:
for line in f:
q, a = line.strip().split("\t")
data.append((dset_name, imgpath, q, a))
return data
class MMEDataset(TaskDataset):
def __init__(self, root, processor):
root = Path(root)
data = []
for subset in EVAL_TYPE_DICT["Perception"] + EVAL_TYPE_DICT["Cognition"]:
data += load_subset(root / subset)
utils.print_rank_0(f"MME total dataset size = {len(data)}")
assert len(data) == 2374
self.data = data
self.processor = processor
def __len__(self):
return len(self.data)
def __getitem__(self, index):
dset_name, imgpath, question, answer = self.data[index]
prompt = f"Answer the question using a single word or phrase. {question}"
prompt = self.build_prompt(prompt)
imgid = imgpath.name
image = Image.open(imgpath)
data = {
"question": question,
"answer": answer,
"image_path": str(imgpath),
"image_id": imgid,
"dataset_name": dset_name,
}
| ex = Example(index, image, prompt, data) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: NVlabs/RADIO
# Path: radio/eradio_model.py
@register_model
def eradio(pretrained=False, **kwargs):
return fastervit2_large_fullres_ws16(pretrained=pretrained, **kwargs)
# Path: radio/radio_model.py
def create_model_from_args(args) -> nn.Module:
in_chans = 3
if args.in_chans is not None:
in_chans = args.in_chans
elif args.input_size is not None:
in_chans = args.input_size[0]
# Skip weight initialization unless it's explicitly requested.
weight_init = args.model_kwargs.pop("weight_init", "skip")
model = create_model(
args.model,
pretrained=args.pretrained,
in_chans=in_chans,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint,
weight_init=weight_init,
**args.model_kwargs,
)
assert (
not args.cls_token_per_teacher or args.cpe_max_size is not None
), "CPE must be enabled for multiple CLS tokens!"
if args.cpe_max_size is not None:
enable_cpe(
model,
args.cpe_max_size,
num_cls_tokens=len(args.teachers) if args.cls_token_per_teacher else 1,
register_multiple=args.register_multiple,
)
return model
# Path: radio/radio_model.py
class RADIOModel(nn.Module):
def __init__(
self,
model: nn.Module,
input_conditioner: InputConditioner,
return_summary: bool,
return_spatial_features: bool,
):
super().__init__()
self.model = model
self.input_conditioner = input_conditioner
self.return_summary = return_summary
self.return_spatial_features = return_spatial_features
def forward(self, x: torch.Tensor):
x = self.input_conditioner(x)
y = self.model.forward_features(x)
if isinstance(y, (list, tuple)):
summary, all_feat = y
elif isinstance(self.model, VisionTransformer):
patch_gen = getattr(self.model, "patch_generator", None)
if patch_gen is not None:
summary = y[:, : patch_gen.num_cls_tokens].flatten(1)
all_feat = y[:, patch_gen.num_skip :]
elif self.model.global_pool == "avg":
summary = y[:, self.model.num_prefix_tokens :].mean(dim=1)
all_feat = y
else:
summary = y[:, 0]
all_feat = y[:, 1:]
else:
raise ValueError("Unsupported model type")
if self.return_summary and self.return_spatial_features:
return summary, all_feat
elif self.return_summary:
return summary
return all_feat
# Path: radio/input_conditioner.py
def get_default_conditioner():
from timm.data.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
return InputConditioner(
input_scale=1.0,
norm_mean=OPENAI_CLIP_MEAN,
norm_std=OPENAI_CLIP_STD,
)
# Path: radio/input_conditioner.py
class InputConditioner(nn.Module):
def __init__(self,
input_scale: float,
norm_mean: norm_t,
norm_std: norm_t,
dtype: torch.dtype = torch.float32,
):
super().__init__()
self.dtype = dtype
# self.input_scale = input_scale
self.register_buffer("norm_mean", _to_tensor(norm_mean) / input_scale)
self.register_buffer("norm_std", _to_tensor(norm_std) / input_scale)
def forward(self, x: torch.Tensor):
# x = x * self.input_scale
y = (x - self.norm_mean) / self.norm_std
return y.to(self.dtype)
# Path: radio/hf_model.py
from collections import namedtuple
from typing import Optional
from timm.models import VisionTransformer
from transformers import PretrainedConfig, PreTrainedModel
from .eradio_model import eradio
from .radio_model import create_model_from_args
from .radio_model import RADIOModel as RADIOModelBase
from .input_conditioner import get_default_conditioner, InputConditioner
import torch
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RADIOConfig(PretrainedConfig):
"""Pretrained Hugging Face configuration for RADIO models."""
def __init__(
self,
args: Optional[dict] = None,
version: Optional[str] = "v1",
return_summary: Optional[bool] = True,
return_spatial_features: Optional[bool] = True,
**kwargs,
):
self.args = args
self.version = version
self.return_summary = return_summary
self.return_spatial_features = return_spatial_features
super().__init__(**kwargs)
class RADIOModel(PreTrainedModel):
"""Pretrained Hugging Face model for RADIO.
This class inherits from PreTrainedModel, which provides
HuggingFace's functionality for loading and saving models.
"""
config_class = RADIOConfig
def __init__(self, config):
super().__init__(config)
RADIOArgs = namedtuple("RADIOArgs", config.args.keys())
args = RADIOArgs(**config.args)
self.config = config
| model = create_model_from_args(args) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: taikinman/langrila
# Path: src/langrila/base.py
class BaseModule(ABC):
@abstractmethod
def run(self, *args, **kwargs):
raise NotImplementedError
async def arun(self, *args, **kwargs):
raise NotImplementedError
def stream(self, *args, **kwargs):
raise NotImplementedError
async def astream(self, *args, **kwargs):
raise NotImplementedError
def __call__(self, *args, **kwargs):
_async = kwargs.pop("arun", False)
_stream = kwargs.pop("stream", False)
if _async:
if _stream:
return self.astream(*args, **kwargs)
else:
return asyncio.create_task(self.arun(*args, **kwargs))
else:
if _stream:
return self.stream(*args, **kwargs)
else:
return self.run(*args, **kwargs)
# Path: src/langrila/result.py
class RetrievalResult(BaseModel):
ids: list[int | str]
documents: list[str]
metadatas: Optional[list[dict[str, Any]] | list[None]]
similarities: list[float]
usage: Usage
# Path: src/langrila/usage.py
class Usage(BaseModel):
prompt_tokens: int = 0
completion_tokens: int = 0
def __add__(self, other: __class__ | dict | CompletionUsage):
if isinstance(other, dict):
other = Usage(**other)
if hasattr(other, 'prompt_tokens'):
prompt_tokens = self.prompt_tokens + other.prompt_tokens
else:
prompt_tokens = self.prompt_tokens
if hasattr(other, 'completion_tokens'):
completion_tokens = self.completion_tokens + other.completion_tokens
else:
completion_tokens = self.completion_tokens
return Usage(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
)
def __sub__(self, other: __class__ | dict | CompletionUsage):
if isinstance(other, dict):
other = Usage(**other)
if hasattr(other, 'prompt_tokens'):
prompt_tokens = self.prompt_tokens - other.prompt_tokens
else:
prompt_tokens = self.prompt_tokens
if hasattr(other, 'completion_tokens'):
completion_tokens = self.completion_tokens - other.completion_tokens
else:
completion_tokens = self.completion_tokens
return Usage(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
)
@property
def total_tokens(self):
return self.prompt_tokens + self.completion_tokens
@field_validator('prompt_tokens')
def check_prompt_tokens(cls, v):
if v < 0:
raise ValueError('prompt_tokens must be greater or equal to 0')
return v
@field_validator('completion_tokens')
def check_completion_tokens(cls, v):
if v < 0:
raise ValueError('completion_tokens must be greater or equal to 0')
return v
def __repr__(self):
return f'Usage(prompt_tokens={self.prompt_tokens}, completion_tokens={self.completion_tokens}, total_tokens={self.total_tokens})'
# Path: src/langrila/database/chroma.py
import sys
import chromadb
from pathlib import Path
from typing import Optional
from ..base import BaseModule
from ..result import RetrievalResult
from ..usage import Usage
python_version = sys.version_info
# NOTE : Python version < 3.10 is bundled by lower version sqlite client, so in that case sqlite modules is override
# https://docs.trychroma.com/troubleshooting#sqlite
__import__("pysqlite3")
sys.modules["sqlite3"] = sys.modules.pop("pysqlite3")
class ChromaCollectionModule(BaseModule):
def __init__(
self,
persistence_directory: str,
collection_name: str,
embedder: Optional[BaseModule] = None,
):
self.embedder = embedder
self.persistence_directory = Path(persistence_directory)
self.collection_name = collection_name
def run(
self,
documents: list[str],
metadatas: Optional[list[dict[str, str]]]=None,
embeddings: Optional[list[list[float]]] = None,
) -> None:
if embeddings is None:
if self.embedder is not None:
embeddings = self.embedder(documents).embeddings
else:
raise AttributeError(
"attribute embedder must be the instance of the class inheriting BaseModule."
)
ids = [str(i) for i in range(len(documents))]
client = chromadb.PersistentClient(path=self.persistence_directory.as_posix())
# recreation collection
try:
client.delete_collection(name=self.collection_name)
except ValueError:
pass
collection = client.get_or_create_collection(
name=self.collection_name, metadata={"hnsw:space": "cosine"}
)
collection.upsert(ids=ids, embeddings=embeddings, documents=documents, metadatas=metadatas)
def as_retriever(
self, n_results: int = 4, threshold_similarity: float = 0.8, return_only_relevant_docs: bool = False
) -> "ChromaRetrievalModule":
return ChromaRetrievalModule(
embedder=self.embedder,
persistence_directory=self.persistence_directory,
collection_name=self.collection_name,
n_results=n_results,
threshold_similarity=threshold_similarity,
return_only_relevant_docs=return_only_relevant_docs,
)
class ChromaRetrievalModule(BaseModule):
def __init__(
self,
embedder: BaseModule,
persistence_directory: str,
collection_name: str,
n_results: int = 4,
threshold_similarity: float = 0.8,
return_only_relevant_docs: bool = False,
):
assert isinstance(
embedder, BaseModule
), "embedder must be the instance of the class inheriting BaseModule."
self.embedder = embedder
self.n_results = n_results
self.threshold_similarity = threshold_similarity
self.persistence_directory = persistence_directory
self.collection_name = collection_name
self.return_only_relevant_docs = return_only_relevant_docs
self.n_results = n_results
def run(
self,
query: str,
where: Optional[dict] = None,
) -> dict:
query_embed = self.embedder(query)
client = chromadb.PersistentClient(path=self.persistence_directory.as_posix())
collection = client.get_collection(name=self.collection_name)
retrieved = collection.query(
query_embeddings=query_embed.embeddings[0], n_results=self.n_results, where=where
)
_results = self.filter_with_distance(retrieved)
results = RetrievalResult(
ids=_results["ids"],
documents=_results["documents"],
metadatas=_results["metadatas"],
similarities=_results["similarities"],
| usage=Usage( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Open-All-Scale-Causal-Engine/OpenASCE
# Path: openasce/inference/learner/dml.py
class DML(_DML, InferenceModel):
def fit(
self,
*,
X: Iterable[np.ndarray],
Y: Iterable[np.ndarray],
T: Iterable[np.ndarray],
**kwargs
):
"""Feed the sample data and train the model used to effect on the samples.
Arguments:
X: Features of the samples.
Y: Outcomes of the samples.
T: Treatments of the samples.
Returns:
"""
def _nuisance_fit(
_self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None
):
assert Z is None, "Cannot accept instrument!"
param = {
"X": X,
"W": W,
"T": T,
"Y": Y,
"model_t": _self._model_t,
"model_y": _self._model_y,
"sample_weight": sample_weight,
"groups": groups,
}
results = self.launch(num=2, param=param, dataset=None)
for r in results:
if "model_t" in r:
_self._model_t = r["model_t"]
elif "model_y" in r:
_self._model_y = r["model_y"]
return _self
_ModelNuisance.fit = _nuisance_fit
super().fit(Y, T, X=X, **kwargs)
def todo(self, idx: int, total_num: int, param: Any, dataset: Iterable) -> Any:
model_t = param.pop("model_t")
model_y = param.pop("model_y")
X, Y, T, W = param["X"], param["Y"], param["T"], param["W"]
sample_weight, groups = param["sample_weight"], param["groups"]
result = {"idx": idx}
if idx == 0:
model_t.fit(
X,
W,
T,
**filter_none_kwargs(sample_weight=sample_weight, groups=groups)
)
result["model_t"] = model_t
elif idx == 1:
model_y.fit(
X,
W,
Y,
**filter_none_kwargs(sample_weight=sample_weight, groups=groups)
)
result["model_y"] = model_y
return result
def estimate(self, *, X: Iterable[np.ndarray]) -> NoReturn:
"""Feed the sample data and estimate the effect on the samples
Arguments:
X: Features of the samples.
Returns:
"""
self._estimate_result = self.const_marginal_effect(X)
# Path: tests/datasets/ihdp_data.py
def get_ihdp_data():
"""
Loads the IHDP dataset, refer to https://raw.githubusercontent.com/AMLab-Amsterdam/CEVAE/master/datasets/IHDP/csv/ihdp_npci_1.csv
"""
col_names = ["treatment", "y_factual", "y_cfactual", "mu0", "mu1"] + [
"x{}".format(i + 1) for i in range(25)
]
csv_path = os.path.join(os.path.dirname(inspect.getfile(get_ihdp_data)), "ihdp.csv")
df = pd.read_csv(csv_path, names=col_names)
logger.info("IHDP dataset loaded.")
return df.iloc[:523], df.iloc[523:]
# Path: openasce/utils/logger.py
GLOBAL_LOGGER_NAME = "openasce-log"
DEFAULT_FORMAT = (
"[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)d:%(funcName)s] %(message)s"
)
DEFAULT_FORMATTER = logging.Formatter(DEFAULT_FORMAT)
def init_custom_logger(name):
class openasceLogger(object):
# Path: openasce/inference/learner/dml_test.py
from unittest import TestCase
from econml.sklearn_extensions.linear_model import WeightedLassoCVWrapper
from sklearn.linear_model import LassoCV
from openasce.inference.learner.dml import DML
from tests.datasets.ihdp_data import get_ihdp_data
from openasce.utils.logger import logger
import numpy as np
# Copyright 2023 AntGroup CO., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
class TestDML(TestCase):
def setUp(self) -> None:
self.train_data, self.test_data = get_ihdp_data()
np.random.seed(12)
return super().setUp()
def test_dml(self):
np.random.seed(12)
learner = DML(
model_y=WeightedLassoCVWrapper(),
model_t=WeightedLassoCVWrapper(),
model_final=LassoCV(cv=3),
categories=[0, 1],
)
learner.fit(
X=self.train_data[self.train_data.columns[5:]]
.to_numpy()
.astype(np.float32),
Y=self.train_data["y_factual"],
T=self.train_data["treatment"],
)
learner.estimate(
X=self.test_data[self.train_data.columns[5:]].to_numpy().astype(np.float32)
)
avg = np.average(learner.get_result())
| logger.info(f"dml result: {avg}") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: latorc/Wechat-AI-Assistant
# Path: wcf_wrapper.py
class WcfWrapper:
def __init__(self) -> None:
def __del__(self):
def msg_preview_str(self, msg:WxMsg) -> str:
def wxid_to_nickname(self, wxid) -> str:
def wxid_to_wxcode(self, wxid) -> str:
def get_msg(self) -> WxMsg:
def get_msg_text(self, msg:WxMsg) -> str:
def get_content_type(self, msg:WxMsg) -> int:
def get_refer_content(self, msg:WxMsg) -> ChatMsg:
def get_msg_extra(self, msgid:str, sample_extra:str) -> str:
def get_image(self, msgid:str, extra:str) -> str:
def get_video(self, msgid:str, extra:str) -> str:
def send_message(self, chat_msg:ChatMsg, receiver:str, at_list:str="") -> int:
def send_text(self, msg: str, receiver: str, at_list: str = "") -> int:
def send_image(self, file:str, receiver:str) -> int:
def send_file(self, file:str, receiver:str) -> int:
def search_msg(self):
# Path: config.py
class AdminCmd(Enum):
""" 微信机器人管理员命令, 与配置项目名称对应 """
help = auto()
reload_config = auto()
clear_chat = auto()
load_preset = auto()
reset_preset = auto()
list_preset = auto()
chat_id = auto()
@property
def description(self):
""" 返回命令的描述说明 """
texts = {
AdminCmd.help: "显示帮助信息",
AdminCmd.reload_config: "重新载入配置文件",
AdminCmd.clear_chat: "清除当前对话记忆",
AdminCmd.load_preset: "预设名 为当前对话载入预设",
AdminCmd.reset_preset: "为当前对话清除预设",
AdminCmd.list_preset: "列出当前可用预设",
AdminCmd.chat_id: "显示当前对话(群聊或单聊)的id"
}
return texts.get(self, "")
# Path: common.py
class ContentType(Enum):
""" 表示用微信发送的消息的类型"""
text = 1 # 文字
image = 3 # 图片
link = 4 # 链接
file = 6 # 文件
voice = 34 # 语音
video = 43 # 视频
ERROR = 9000 # 错误
UNSUPPORTED = 9001 # 不支持类型
# Path: common.py
class ChatMsg:
""" 代表某种类型的消息, 用于内部数据传递 """
def __init__(self, type:ContentType, content:str) -> None:
""" 初始化
Args:
type (ContentType): 附件类型
content (str): 附件内容
"""
self.type = type
self.content = content
# Path: chatbot.py
import queue
import re
import config
import common
import openai_wrapper
import preset
from typing import Tuple
from wcf_wrapper import WcfWrapper, ContentType
from wcferry import WxMsg
from config import AdminCmd
from common import ContentType, ChatMsg
class Chatbot():
""" 管理微信机器人逻辑. 管理与微信客户端 (如Wechat Ferry) 和 AI 客户端 (如 OpenAI )的交互逻辑 """
def __init__(self, config: config.Config, wcfw: WcfWrapper, oaiw: openai_wrapper.OpenAIWrapper) -> None:
""" 初始化
args:
config (Config): Config对象
wcfw (WcfWrapper): Wechat Ferry Wrapper对象
oaiw (OpenAIWrapper): AI Wrapper对象
"""
self.config = config
self.wcfw = wcfw
self.openai_wrapper = oaiw
self.chat_presets:dict[str, preset.Preset] = {} # 每个对话的预设 {roomid或wxid: 预设}
def start_main_loop(self) -> None:
"""
主循环, 接收并处理微信消息.
该函数阻塞进程.
"""
while self.wcfw.wcf.is_receiving_msg():
try:
msg:WxMsg = self.wcfw.get_msg()
note = f"收到消息 {self.wcfw.msg_preview_str(msg)}"
common.logger().info(note)
except queue.Empty:
continue # 无消息,继续
except Exception as e:
common.logger().error("接收微信消息错误: %s", common.error_trace(e))
try:
self.run_wxmsg(msg)
except Exception as e:
common.logger().error("处理消息错误:%s", common.error_trace(e))
def run_wxmsg(self, msg:WxMsg):
""" 读取并处理一条消息
args:
msg (WxMsg): 消息对象. 群号: msg.roomid, 发送者微信ID: msg.sender, 消息内容: msg.content
"""
content = self._filter_wxmsg(msg)
if content is None:
return
# 确定回复对象
if msg.from_group():
receiver = msg.roomid
if msg.from_self():
at_list = ""
else:
at_list = msg.sender
else: #单聊
receiver = msg.sender
at_list = ""
# 发送者是管理员, 并且是命令时, 处理命令并直接返回
if self.wcfw.wxid_to_wxcode(msg.sender) in self.config.admins:
cmd = self._match_admin_cmd(content)
if cmd:
try:
self.process_admin_cmd(content, receiver, at_list)
except Exception as e:
common.logger().error("执行管理员命令错误: %s",common.error_trace(e))
self.wcfw.send_text(f"执行管理员命令'{content}'发生错误", receiver, at_list)
return
### 调用 AI 处理消息
# 回调函数, 处理 AI 返回消息
| def callback_msg(msg:ChatMsg) -> int:
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: tensorsense/faceflow
# Path: lib/data/cfg/local.py
class LocalNaturalDatasetCfg:
name: str
root: str
labels_filename: str = "au.csv"
crops_dir: str = "crops"
aus: List[str] = field(
default_factory=lambda: [
"AU1",
"AU2",
"AU4",
]
)
# Path: lib/data/datamodules/vanilla.py
class AUDataModule(pl.LightningDataModule):
def __init__(self, dataset_cfg: Dict[str, List],
image_size: int = 224,
logits_per_class: int = 2,
train_transforms: A.Compose = None,
val_transforms: A.Compose = None,
batch_size: int = 16,
num_workers: int = 4,
random_state: int = 1337):
"""
Wrapper that abstracts away data handling, like instantiating datasets, setting dataloaders etc.
:param dataset_cfg: dict with {'train', 'val'} keys, each item contains a list of dict configs for datasets used
during the corresponding stage. The config has to include name, root, images and labels paths.
:param image_size: to what size the input is going to be rescaled and cropped
:param train_transforms:
:param val_transforms:
:param batch_size:
:param num_workers:
:param random_state:
"""
super().__init__()
self.dataset_cfg = dataset_cfg
self.image_size = image_size
self.logits_per_class = logits_per_class
self.train_transform = train_transforms
self.val_transform = val_transforms
self.train_dataset = None
self.val_datasets = None
self.random_state = random_state
self.num_workers = num_workers
self.batch_size = batch_size
def fetch_datasets(self, mode="train"):
assert mode in {"train", "val"}
cfg = self.dataset_cfg[mode]
datasets = []
for ds in cfg:
datasets.append(SimpleAUDataset(name=ds.name,
root=ds.root,
crops_dir=ds.crops_dir,
labels_filename=ds.labels_filename,
aus=ds.aus,
transform=self.train_transform if mode in {"train"} else self.val_transform,
logits_per_class=self.logits_per_class))
return datasets
def setup(self, stage: str):
if stage == "fit":
self.train_dataset = ConcatDataset(self.fetch_datasets("train"))
self.val_datasets = self.fetch_datasets("val")
print(f"Train size: {len(self.train_dataset)}")
print(f"Val sizes: {[len(d) for d in self.val_datasets]}")
if stage == "test":
self.val_datasets = self.fetch_datasets("val")
if stage == "predict":
self.val_datasets = self.fetch_datasets("val")
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)
def val_dataloader(self):
return [DataLoader(d, batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers) for d in self.val_datasets]
def test_dataloader(self):
return [DataLoader(d, batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers) for d in self.val_datasets]
def predict_dataloader(self):
return [DataLoader(d, batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers) for d in self.val_datasets]
# Path: params/datamodule.py
import albumentations as A
import wandb
from albumentations.pytorch import ToTensorV2
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from lib.data.cfg.local import LocalNaturalDatasetCfg
from lib.data.datamodules.vanilla import AUDataModule
project = "disfa"
aus = [
"AU1",
"AU2",
"AU4",
"AU5",
"AU6",
"AU9",
"AU12",
"AU15",
"AU17",
"AU20",
"AU26",
]
TRAIN_LABELED = [
| LocalNaturalDatasetCfg( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Psivant/femto
# Path: femto/md/constants.py
class OpenMMForceGroup(enum.IntEnum):
"""Standard force groups to assign to common OpenMM forces to make them easier to
identify."""
BOND = 0
ANGLE = 1
DIHEDRAL = 2
NONBONDED = 3
COM_RESTRAINT = 4
POSITION_RESTRAINT = 5
ALIGNMENT_RESTRAINT = 6
BAROSTAT = 7
ATM = 8
OTHER = 16
# Path: femto/md/constants.py
class OpenMMForceName(str, enum.Enum):
"""Standard names use for common OpenMM forces to make them easier to identify."""
COM_RESTRAINT = "com-restraint"
POSITION_RESTRAINT = "position-restraint"
ALIGNMENT_RESTRAINT = "alignment-restraint"
# Path: femto/fe/atm/_setup.py
import logging
import tempfile
import typing
import numpy
import openmm
import openmm.app
import openmm.unit
import parmed
import scipy.spatial.distance
import femto.fe.reference
import femto.md.rest
import femto.md.restraints
import femto.md.solvate
import femto.md.system
import femto.md.utils.openmm
import femto.fe.atm
import femto.fe.atm._utils
from femto.md.constants import OpenMMForceGroup, OpenMMForceName
_LOGGER = logging.getLogger(__name__)
def select_displacement(
receptor: parmed.amber.AmberParm,
ligand_1: parmed.amber.AmberParm,
ligand_2: parmed.amber.AmberParm | None,
distance: openmm.unit.Quantity,
) -> openmm.unit.Quantity:
"""Attempts to automatically select a displacement vector for the ligands.
Args:
receptor: The receptor.
ligand_1: The first ligand positioned in the binding site.
ligand_2: The second ligand positioned in the binding site.
distance: The distance to translate ligands along the displacement vector by.
Returns:
The displacement vector.
"""
ligand_coords = numpy.vstack(
[ligand_1.coordinates] + ([] if ligand_2 is None else [ligand_2.coordinates])
)
receptor_coords = receptor.coordinates
directions = numpy.array(
[
[-1.0, -1.0, -1.0],
[+1.0, -1.0, -1.0],
[+1.0, +1.0, -1.0],
[-1.0, +1.0, -1.0],
[-1.0, -1.0, +1.0],
[+1.0, -1.0, +1.0],
[+1.0, +1.0, +1.0],
[-1.0, +1.0, +1.0],
]
)
directions /= numpy.linalg.norm(directions, axis=1, keepdims=True)
closest_distances = []
for direction in directions:
displacement = direction * distance.value_in_unit(openmm.unit.angstrom)
offset_coords = ligand_coords + displacement
distances = scipy.spatial.distance.cdist(offset_coords, receptor_coords)
closest_distances.append(distances.min())
direction = directions[numpy.argmax(closest_distances)]
return direction.flatten() * distance
def _offset_ligand(
ligand: parmed.Structure, offset: openmm.unit.Quantity
) -> parmed.Structure:
"""Offsets the coordinates of the specified ligand by a specified amount.
Args:
ligand: The ligand to offset.
offset: The amount to offset the ligand by.
Returns:
The offset ligand.
"""
# we copy in this strange way because parmed doesn't
# copy all attrs correctly when using copy.deepycopy
with tempfile.TemporaryDirectory() as tmpdir:
ligand.save(f"{tmpdir}/ligand.parm7")
ligand.save(f"{tmpdir}/ligand.mol2")
ligand = parmed.amber.AmberParm(
f"{tmpdir}/ligand.parm7", f"{tmpdir}/ligand.mol2"
)
for atom in ligand.atoms:
atom.xx += offset[0].value_in_unit(openmm.unit.angstrom)
atom.xy += offset[1].value_in_unit(openmm.unit.angstrom)
atom.xz += offset[2].value_in_unit(openmm.unit.angstrom)
return ligand
def _apply_atm_restraints(
system: openmm.System,
config: "femto.fe.atm.ATMRestraints",
ligand_1_com_idxs: list[int],
ligand_1_ref_idxs: tuple[int, int, int] | None,
ligand_2_com_idxs: list[int] | None,
ligand_2_ref_idxs: tuple[int, int, int] | None,
receptor_ref_idxs: list[int],
offset: openmm.unit.Quantity,
):
"""Adds center of mass (COM) and optionally alignment restraints (if running RBFE)
to a system.
Args:
system: The system to add the constraints to in-place.
config: The restraint configuration.
ligand_1_com_idxs: The indices to use when computing the COM of the first
ligand.
ligand_1_ref_idxs: The indices of the first ligand to align on.
ligand_2_com_idxs: The indices to use when computing the COM of the second
ligand.
ligand_2_ref_idxs: The indices of the second ligand to align on.
receptor_ref_idxs: The indices of the receptor atoms that form the binding site.
offset: The vector that the ligand will be offset by during the ATM calculation.
"""
com_restraint = femto.fe.atm._utils.create_com_restraint(
ligand_1_com_idxs,
receptor_ref_idxs,
config.com.k,
config.com.radius,
[0.0, 0.0, 0.0] * openmm.unit.angstrom,
)
com_restraint.setForceGroup(OpenMMForceGroup.COM_RESTRAINT)
| com_restraint.setName(OpenMMForceName.COM_RESTRAINT) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AIFSH/NativeDancer
# Path: nativedancer/third_part/detectron2/data/catalog.py
class _DatasetCatalog(UserDict):
class Metadata(types.SimpleNamespace):
class _MetadataCatalog(UserDict):
def register(self, name, func):
def get(self, name):
def list(self) -> List[str]:
def remove(self, name):
def __str__(self):
def __getattr__(self, key):
def __setattr__(self, key, val):
def as_dict(self):
def set(self, **kwargs):
def get(self, key, default=None):
def get(self, name):
def list(self):
def remove(self, name):
def __str__(self):
_RENAMED = {
"class_names": "thing_classes",
"dataset_id_to_contiguous_id": "thing_dataset_id_to_contiguous_id",
"stuff_class_names": "stuff_classes",
}
# Path: nativedancer/third_part/detectron2/utils/comm.py
_LOCAL_PROCESS_GROUP = None
_MISSING_LOCAL_PG_ERROR = (
"Local process group is not yet created! Please use detectron2's `launch()` "
"to start processes and initialize pytorch process group. If you need to start "
"processes in other ways, please call comm.create_local_process_group("
"num_workers_per_machine) after calling torch.distributed.init_process_group()."
)
_LOCAL_PROCESS_GROUP = pg
def get_world_size() -> int:
def get_rank() -> int:
def create_local_process_group(num_workers_per_machine: int) -> None:
def get_local_process_group():
def get_local_rank() -> int:
def get_local_size() -> int:
def is_main_process() -> bool:
def synchronize():
def _get_global_gloo_group():
def all_gather(data, group=None):
def gather(data, dst=0, group=None):
def shared_random_seed():
def reduce_dict(input_dict, average=True):
# Path: nativedancer/third_part/detectron2/utils/file_io.py
class Detectron2Handler(PathHandler):
PREFIX = "detectron2://"
S3_DETECTRON2_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/"
def _get_supported_prefixes(self):
def _get_local_path(self, path, **kwargs):
def _open(self, path, mode="r", **kwargs):
# Path: nativedancer/third_part/detectron2/evaluation/evaluator.py
class DatasetEvaluator:
"""
Base class for a dataset evaluator.
The function :func:`inference_on_dataset` runs the model over
all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.
This class will accumulate information of the inputs/outputs (by :meth:`process`),
and produce evaluation results in the end (by :meth:`evaluate`).
"""
def reset(self):
"""
Preparation for a new round of evaluation.
Should be called before starting a round of evaluation.
"""
pass
def process(self, inputs, outputs):
"""
Process the pair of inputs and outputs.
If they contain batches, the pairs can be consumed one-by-one using `zip`:
.. code-block:: python
for input_, output in zip(inputs, outputs):
# do evaluation on single input/output pair
...
Args:
inputs (list): the inputs that's used to call the model.
outputs (list): the return value of `model(inputs)`
"""
pass
def evaluate(self):
"""
Evaluate/summarize the performance, after processing all input/output pairs.
Returns:
dict:
A new evaluator class can return a dict of arbitrary format
as long as the user can process the results.
In our train_net.py, we expect the following format:
* key: the name of the task (e.g., bbox)
* value: a dict of {metric name: score}, e.g.: {"AP50": 80}
"""
pass
# Path: nativedancer/third_part/detectron2/evaluation/cityscapes_evaluation.py
import glob
import logging
import numpy as np
import os
import tempfile
import torch
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval
import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval
from collections import OrderedDict
from PIL import Image
from ..data import MetadataCatalog
from ..utils import comm
from ..utils.file_io import PathManager
from .evaluator import DatasetEvaluator
from cityscapesscripts.helpers.labels import name2label
from cityscapesscripts.helpers.labels import trainId2label
# Copyright (c) Facebook, Inc. and its affiliates.
class CityscapesEvaluator(DatasetEvaluator):
"""
Base class for evaluation using cityscapes API.
"""
def __init__(self, dataset_name):
"""
Args:
dataset_name (str): the name of the dataset.
It must have the following metadata associated with it:
"thing_classes", "gt_dir".
"""
self._metadata = MetadataCatalog.get(dataset_name)
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
def reset(self):
self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_")
self._temp_dir = self._working_dir.name
# All workers will write to the same results directory
# TODO this does not work in distributed training
assert (
| comm.get_local_size() == comm.get_world_size() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ethanweber/nerfiller
# Path: nerfiller/inpaint/saicinpainting/training/modules/depthwise_sep_conv.py
class DepthWiseSeperableConv(nn.Module):
def __init__(self, in_dim, out_dim, *args, **kwargs):
super().__init__()
if "groups" in kwargs:
# ignoring groups for Depthwise Sep Conv
del kwargs["groups"]
self.depthwise = nn.Conv2d(in_dim, in_dim, *args, groups=in_dim, **kwargs)
self.pointwise = nn.Conv2d(in_dim, out_dim, kernel_size=1)
def forward(self, x):
out = self.depthwise(x)
out = self.pointwise(out)
return out
# Path: nerfiller/inpaint/saicinpainting/training/modules/multidilated_conv.py
class MultidilatedConv(nn.Module):
def __init__(
self,
in_dim,
out_dim,
kernel_size,
dilation_num=3,
comb_mode="sum",
equal_dim=True,
shared_weights=False,
padding=1,
min_dilation=1,
shuffle_in_channels=False,
use_depthwise=False,
**kwargs,
):
super().__init__()
convs = []
self.equal_dim = equal_dim
assert comb_mode in ("cat_out", "sum", "cat_in", "cat_both"), comb_mode
if comb_mode in ("cat_out", "cat_both"):
self.cat_out = True
if equal_dim:
assert out_dim % dilation_num == 0
out_dims = [out_dim // dilation_num] * dilation_num
self.index = sum(
[[i + j * (out_dims[0]) for j in range(dilation_num)] for i in range(out_dims[0])],
[],
)
else:
out_dims = [out_dim // 2 ** (i + 1) for i in range(dilation_num - 1)]
out_dims.append(out_dim - sum(out_dims))
index = []
starts = [0] + out_dims[:-1]
lengths = [out_dims[i] // out_dims[-1] for i in range(dilation_num)]
for i in range(out_dims[-1]):
for j in range(dilation_num):
index += list(range(starts[j], starts[j] + lengths[j]))
starts[j] += lengths[j]
self.index = index
assert len(index) == out_dim
self.out_dims = out_dims
else:
self.cat_out = False
self.out_dims = [out_dim] * dilation_num
if comb_mode in ("cat_in", "cat_both"):
if equal_dim:
assert in_dim % dilation_num == 0
in_dims = [in_dim // dilation_num] * dilation_num
else:
in_dims = [in_dim // 2 ** (i + 1) for i in range(dilation_num - 1)]
in_dims.append(in_dim - sum(in_dims))
self.in_dims = in_dims
self.cat_in = True
else:
self.cat_in = False
self.in_dims = [in_dim] * dilation_num
conv_type = DepthWiseSeperableConv if use_depthwise else nn.Conv2d
dilation = min_dilation
for i in range(dilation_num):
if isinstance(padding, int):
cur_padding = padding * dilation
else:
cur_padding = padding[i]
convs.append(
conv_type(
self.in_dims[i],
self.out_dims[i],
kernel_size,
padding=cur_padding,
dilation=dilation,
**kwargs,
)
)
if i > 0 and shared_weights:
convs[-1].weight = convs[0].weight
convs[-1].bias = convs[0].bias
dilation *= 2
self.convs = nn.ModuleList(convs)
self.shuffle_in_channels = shuffle_in_channels
if self.shuffle_in_channels:
# shuffle list as shuffling of tensors is nondeterministic
in_channels_permute = list(range(in_dim))
random.shuffle(in_channels_permute)
# save as buffer so it is saved and loaded with checkpoint
self.register_buffer("in_channels_permute", torch.tensor(in_channels_permute))
def forward(self, x):
if self.shuffle_in_channels:
x = x[:, self.in_channels_permute]
outs = []
if self.cat_in:
if self.equal_dim:
x = x.chunk(len(self.convs), dim=1)
else:
new_x = []
start = 0
for dim in self.in_dims:
new_x.append(x[:, start : start + dim])
start += dim
x = new_x
for i, conv in enumerate(self.convs):
if self.cat_in:
input = x[i]
else:
input = x
outs.append(conv(input))
if self.cat_out:
out = torch.cat(outs, dim=1)[:, self.index]
else:
out = sum(outs)
return out
# Path: nerfiller/inpaint/saicinpainting/training/modules/base.py
import abc
import torch
import torch.nn as nn
from typing import Tuple, List
from nerfiller.inpaint.saicinpainting.training.modules.depthwise_sep_conv import (
DepthWiseSeperableConv,
)
from nerfiller.inpaint.saicinpainting.training.modules.multidilated_conv import (
MultidilatedConv,
)
class BaseDiscriminator(nn.Module):
@abc.abstractmethod
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""
Predict scores and get intermediate activations. Useful for feature matching loss
:return tuple (scores, list of intermediate activations)
"""
raise NotImplemented()
def get_conv_block_ctor(kind="default"):
if not isinstance(kind, str):
return kind
if kind == "default":
return nn.Conv2d
if kind == "depthwise":
return DepthWiseSeperableConv
if kind == "multidilated":
| return MultidilatedConv |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nnanhuang/Customize-it-3D
# Path: ldm/modules/diffusionmodules/util.py
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
# select alphas for computing the variance schedule
alphas = alphacums[ddim_timesteps]
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
# according the the formula provided in https://arxiv.org/abs/2010.02502
sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
if verbose:
print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
print(f'For the chosen value of eta, which is {eta}, '
f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
return sigmas, alphas, alphas_prev
# Path: ldm/modules/diffusionmodules/util.py
def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
if ddim_discr_method == 'uniform':
c = num_ddpm_timesteps // num_ddim_timesteps
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
elif ddim_discr_method == 'quad':
ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
else:
raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
# add one to get the final alpha values right (the ones from first scale to data during sampling)
steps_out = ddim_timesteps + 1
if verbose:
print(f'Selected timesteps for ddim sampler: {steps_out}')
return steps_out
# Path: ldm/modules/diffusionmodules/util.py
def noise_like(shape, device, repeat=False):
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
noise = lambda: torch.randn(shape, device=device)
return repeat_noise() if repeat else noise()
# Path: ldm/models/diffusion/sampling_util.py
def norm_thresholding(x0, value):
s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)
return x0 * (value / s)
# Path: ldm/models/diffusion/plms.py
import torch
import numpy as np
from tqdm import tqdm
from functools import partial
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
from ldm.models.diffusion.sampling_util import norm_thresholding
"""SAMPLING ONLY."""
class PLMSSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
if ddim_eta != 0:
raise ValueError('ddim_eta must be 0 for PLMS')
| self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TaoHuang13/diffusion_reward
# Path: diffusion_reward/models/codec_models/vqgan/codebook.py
class Codebook(nn.Module):
def __init__(self, args):
super(Codebook, self).__init__()
self.num_codebook_vectors = args.num_codebook_vectors
self.latent_dim = args.latent_dim
self.beta = args.beta
self.embedding = nn.Embedding(self.num_codebook_vectors, self.latent_dim)
self.embedding.weight.data.uniform_(-1.0 / self.num_codebook_vectors, 1.0 / self.num_codebook_vectors)
def forward(self, z):
z = z.permute(0, 2, 3, 1).contiguous()
z_flattened = z.view(-1, self.latent_dim)
d = torch.sum(z_flattened**2, dim=1, keepdim=True) + \
torch.sum(self.embedding.weight**2, dim=1) - \
2*(torch.matmul(z_flattened, self.embedding.weight.t()))
min_encoding_indices = torch.argmin(d, dim=1)
z_q = self.embedding(min_encoding_indices).view(z.shape)
loss = torch.mean((z_q.detach() - z)**2) + self.beta * torch.mean((z_q - z.detach())**2)
z_q = z + (z_q - z).detach()
z_q = z_q.permute(0, 3, 1, 2)
return z_q, min_encoding_indices, loss
# Path: diffusion_reward/models/codec_models/vqgan/decoder.py
class Decoder(nn.Module):
def __init__(self, args):
super(Decoder, self).__init__()
#channels = [512, 256, 256, 128, 128]
channels = args.channels[::-1]
attn_resolutions = [16]
num_res_blocks = 2
resolution = args.latent_size
in_channels = channels[0]
layers = [nn.Conv2d(args.latent_dim, in_channels, 3, 1, 1),
ResidualBlock(in_channels, in_channels),
NonLocalBlock(in_channels),
ResidualBlock(in_channels, in_channels)]
for i in range(len(channels)):
out_channels = channels[i]
for j in range(num_res_blocks):
layers.append(ResidualBlock(in_channels, out_channels))
in_channels = out_channels
if resolution in attn_resolutions:
layers.append(NonLocalBlock(in_channels))
#if i != 0 and resolution < 64:
if resolution < args.resolution:
layers.append(UpSampleBlock(in_channels))
resolution *= 2
layers.append(GroupNorm(in_channels))
layers.append(Swish())
layers.append(nn.Conv2d(in_channels, args.image_channels, 3, 1, 1))
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
# Path: diffusion_reward/models/codec_models/vqgan/encoder.py
class Encoder(nn.Module):
def __init__(self, args):
super(Encoder, self).__init__()
#channels = [128, 128, 128, 256, 256, 512]
channels = args.channels
attn_resolutions = [16]
num_res_blocks = 2
resolution = args.resolution
latent_size = args.latent_size
layers = [nn.Conv2d(args.image_channels, channels[0], 3, 1, 1)]
for i in range(len(channels)-1):
in_channels = channels[i]
out_channels = channels[i + 1]
for j in range(num_res_blocks):
layers.append(ResidualBlock(in_channels, out_channels))
in_channels = out_channels
if resolution in attn_resolutions:
layers.append(NonLocalBlock(in_channels))
#if i != len(channels)-2 and resolution > latent_size:
if resolution > latent_size:
layers.append(DownSampleBlock(channels[i+1]))
resolution //= 2
layers.append(ResidualBlock(channels[-1], channels[-1]))
layers.append(NonLocalBlock(channels[-1]))
layers.append(ResidualBlock(channels[-1], channels[-1]))
layers.append(GroupNorm(channels[-1]))
layers.append(Swish())
layers.append(nn.Conv2d(channels[-1], args.latent_dim, 3, 1, 1))
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
# Path: diffusion_reward/models/codec_models/vqgan/vqgan.py
import torch
import torch.nn as nn
from .codebook import Codebook
from .decoder import Decoder
from .encoder import Encoder
class VQGAN(nn.Module):
def __init__(self, args):
super(VQGAN, self).__init__()
self.encoder = Encoder(args).to(device=args.device)
| self.decoder = Decoder(args).to(device=args.device) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: its0x4d/fastapi-jet
# Path: fastapi_jet/cli.py
def _version_callback(value: bool) -> None:
def _register_commands() -> None:
def main(
version: Optional[bool] = typer.Option(
None,
"--version",
"-v",
help="Show the application's version and exit.",
callback=_version_callback,
is_eager=True,
)
) -> None:
# Path: fastapi_jet/context.py
class ProjectContext(BaseModel):
name: str
folder_name: str = None
package_name: str = None
use_templates: bool = False
no_tests: bool = False
@root_validator(pre=True)
def set_folder_name(cls, values):
if not values.get("folder_name"):
values["folder_name"] = name_fixer(values["name"])
return values
@root_validator(pre=True)
def set_package_name(cls, values):
if not values.get("package_name"):
values["package_name"] = name_fixer(values["name"], extra=["-"])
return values
# Path: fastapi_jet/generator.py
def generate_template(template_name: str, context: Union[ProjectContext, AppContext]) -> str:
"""
This function is used to generate a template using the cookiecutter library.
:param template_name: The name of the template to generate.
:type template_name: str
:param context: An object that contains the context for the template.
This can be either a ProjectContext or an AppContext.
:type context: Union[ProjectContext, AppContext]
:return: The path to the generated template.
:rtype: str
"""
try:
# Generate the template using the cookiecutter library. in 'apps' folder
data = {
'template': os.path.join(TEMPLATES_DIR, template_name),
'no_input': True,
'extra_context': context.dict(),
}
if template_name == "app":
data['output_dir'] = os.path.join(os.getcwd(), 'apps')
cookiecutter(**data)
except OutputDirExistsException:
typer.echo(
f"[!] Unable to create FastAPI {template_name}! An app with the same name already exists!"
f"\n[+] Please choose a different name or delete the existing app and try again."
)
else:
text = (
f"[+] {template_name.capitalize()} [{context.folder_name}] created successfully!"
)
if template_name == "app":
text += f"\n[+] To get started, add your app to ROUTERS in app/main.py"
typer.echo(text)
return os.path.join(os.getcwd(), context.folder_name)
# Path: fastapi_jet/utils.py
def binary_question(question: str, default: bool = False) -> questionary.Question:
"""
This function is used to present a binary question (yes/no) to the user.
:param question: The question to ask the user.
:type question: str
:param default: The default answer to the question. If not provided, the default is False (no).
:type default: bool, optional
:return: A questionary.Question object, which can be used to interactively ask the user a question.
:rtype: questionary.Question
"""
return questionary.confirm(
question,
default=default,
)
# Path: fastapi_jet/utils.py
def name_fixer(name: str, extra: List[str] = None) -> str:
"""
This function is used to replace certain special characters in a string with an underscore.
:param name: The original string that needs to be fixed.
:type name: str
:param extra: An optional list of additional characters that should be replaced.
:type extra: List[str], optional
:return: The fixed string.
:rtype: str
"""
# Define the default list of characters to replace.
chars = "* /\\|<>?:\"' "
# If the 'extra' parameter is provided, add its characters to the list of characters to replace.
if extra:
chars += "".join(extra)
# Replace each character in the list with an underscore.
for char in chars:
name = name.replace(char, "_")
return name
# Path: fastapi_jet/commands/startproject.py
import os
import typer
from questionary.form import form
from fastapi_jet.cli import app
from fastapi_jet.context import ProjectContext
from fastapi_jet.generator import generate_template
from fastapi_jet.utils import binary_question, name_fixer
@app.command(name="startproject")
def startproject(
name: str = typer.Argument(
..., help="Name of the project",
callback=lambda name: name_fixer(name),
metavar="PROJECT_NAME"
),
interactive: bool = typer.Option(False, "--interactive", "-i", help="Interactive mode"),
use_templates: bool = typer.Option(False, "--use-templates", "-t", help="Use templates"),
):
"""
Start a new project
"""
if interactive:
project = form(
| use_templates=binary_question("Do you want to use templates?", default=True), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: WithSecureLabs/damn-vulnerable-llm-agent
# Path: tools.py
def get_current_user(input : str):
def get_transactions(userId : str):
# Path: utils.py
def display_instructions():
# Markdown with some basic CSS styles for the box
box_css = """
<style>
.instructions-box {
background-color: #f0f0f0;
border: 1px solid #ddd;
border-radius: 5px;
padding: 20px;
}
</style>
"""
st.sidebar.markdown(box_css, unsafe_allow_html=True)
st.sidebar.markdown(
"""
<div class="instructions-box">
### Instructions
You can exploit this ReAct-based assistant via prompt
injection to get two flags:
- You'll obtain the first flag by accessing the transactions for user with ID 2
- The second flag is DocBrown's password
To help you finish the challenge, we suggest you familiarize yourself with the techniques
described <a href="https://labs.withsecure.com/publications/llm-agent-prompt-injection" target="_blank">here</a>
and <a href="https://youtu.be/43qfHaKh0Xk" target="_blank">here</a>.
</div>
You'll also find the database schema to be useful:
""",
unsafe_allow_html=True,
)
if st.sidebar.button('Show database schema', use_container_width=True):
st.sidebar.info('Users(userId,username,password)\n\nTransactions(transactionId,username,reference,recipient,amount)')
# Path: utils.py
def display_logo():
# Convert your image
image_base64 = get_image_base64("labs-logo.png")
# URL of the company website
url = 'https://labs.withsecure.com/'
# HTML for centered image with hyperlink
html_string = f"""
<div style="display:flex; justify-content:center;">
<a href="{url}" target="_blank">
<img src="data:image/png;base64,{image_base64}" width="150px">
</a>
</div>
"""
# Display the HTML in the sidebar
st.sidebar.markdown(html_string, unsafe_allow_html=True)
# Path: main.py
import langchain
import streamlit as st
from dotenv import load_dotenv
from langchain.agents import ConversationalChatAgent, AgentExecutor
from langchain.callbacks import StreamlitCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
from langchain.agents import initialize_agent
from langchain.callbacks import get_openai_callback
from tools import get_current_user_tool, get_recent_transactions_tool
from utils import display_instructions, display_logo
load_dotenv()
# Initialise tools
tools = [get_current_user_tool, get_recent_transactions_tool]
system_msg = """Assistant helps the current user retrieve the list of their recent bank transactions ans shows them as a table. Assistant will ONLY operate on the userId returned by the GetCurrentUser() tool, and REFUSE to operate on any other userId provided by the user."""
welcome_message = """Hi! I'm an helpful assistant and I can help fetch information about your recent transactions.\n\nTry asking me: "What are my recent transactions?"
"""
st.set_page_config(page_title="Damn Vulnerable LLM Agent")
st.title("Damn Vulnerable LLM Agent")
hide_st_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
header {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
msgs = StreamlitChatMessageHistory()
memory = ConversationBufferMemory(
chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output"
)
if len(msgs.messages) == 0:
msgs.clear()
msgs.add_ai_message(welcome_message)
st.session_state.steps = {}
avatars = {"human": "user", "ai": "assistant"}
for idx, msg in enumerate(msgs.messages):
with st.chat_message(avatars[msg.type]):
# Render intermediate steps if any were saved
for step in st.session_state.steps.get(str(idx), []):
if step[0].tool == "_Exception":
continue
with st.status(f"**{step[0].tool}**: {step[0].tool_input}", state="complete"):
st.write(step[0].log)
st.write(step[1])
st.write(msg.content)
if prompt := st.chat_input(placeholder="Show my recent transactions"):
st.chat_message("user").write(prompt)
llm = ChatOpenAI(
model_name="gpt-4-1106-preview",
temperature=0, streaming=True
)
tools = tools
chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools, verbose=True, system_message=system_msg)
executor = AgentExecutor.from_agent_and_tools(
agent=chat_agent,
tools=tools,
memory=memory,
return_intermediate_steps=True,
handle_parsing_errors=True,
verbose=True,
max_iterations=6
)
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
response = executor(prompt, callbacks=[st_cb])
st.write(response["output"])
st.session_state.steps[str(len(msgs.messages) - 1)] = response["intermediate_steps"]
display_instructions()
| display_logo() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: MarcoGorelli/polars-upgrade
# Path: polars_upgrade/_ast_helpers.py
def ast_to_offset(node: ast.expr | ast.stmt) -> Offset:
return Offset(node.lineno, node.col_offset)
# Path: polars_upgrade/_data.py
def register(tp: type[AST_T]) -> Callable[[ASTFunc[AST_T]], ASTFunc[AST_T]]:
def register_decorator(func: ASTFunc[AST_T]) -> ASTFunc[AST_T]:
FUNCS[tp].append(func)
return func
return register_decorator
# Path: polars_upgrade/_data.py
class State(NamedTuple):
settings: Settings
aliases: set[str] = set()
in_annotation: bool = False
# Path: polars_upgrade/_data.py
class Settings(NamedTuple):
class State(NamedTuple):
class ASTCallbackMapping(Protocol):
AST_T = TypeVar('AST_T', bound=ast.AST)
RECORD_FROM_IMPORTS = frozenset((
'polars',
))
FUNCS = collections.defaultdict(list)
def register(tp: type[AST_T]) -> Callable[[ASTFunc[AST_T]], ASTFunc[AST_T]]:
def register_decorator(func: ASTFunc[AST_T]) -> ASTFunc[AST_T]:
def __getitem__(self, tp: type[AST_T]) -> list[ASTFunc[AST_T]]: ...
def visit(
funcs: ASTCallbackMapping,
tree: ast.Module,
settings: Settings,
) -> dict[Offset, list[TokenFunc]]:
def _import_plugins() -> None:
# Path: polars_upgrade/_token_helpers.py
def find_op(tokens: list[Token], i: int, src: str) -> int:
return _find_token(tokens, i, 'OP', src)
# Path: polars_upgrade/_token_helpers.py
def is_simple_expression(node: ast.expr, aliases: set[str]) -> bool:
while True:
if isinstance(node, ast.Call):
node = node.func
elif (
isinstance(node, ast.Attribute) and
node.attr.islower() and
isinstance(node.value, ast.Name) and
node.value.id in aliases
):
return True
elif isinstance(node, ast.Attribute):
node = node.value
else:
return False
# Path: polars_upgrade/_plugins/map_dict.py
import ast
import functools
from typing import Iterable
from tokenize_rt import NON_CODING_TOKENS
from tokenize_rt import Offset
from tokenize_rt import Token
from polars_upgrade._ast_helpers import ast_to_offset
from polars_upgrade._data import register
from polars_upgrade._data import State
from polars_upgrade._data import TokenFunc
from polars_upgrade._token_helpers import find_op
from polars_upgrade._token_helpers import is_simple_expression
from __future__ import annotations
def rename(
i: int,
tokens: list[Token],
*,
name: str,
new: str,
) -> None:
while not (tokens[i].name == 'NAME' and tokens[i].src == name):
i += 1
tokens[i] = tokens[i]._replace(src=new)
def rename_and_add_default(
i: int,
tokens: list[Token],
*,
name: str,
new: str,
) -> None:
while not (tokens[i].name == 'NAME' and tokens[i].src == name):
i += 1
tokens[i] = tokens[i]._replace(src=new)
start_paren = find_op(tokens, i, '(')
close_paren = find_op(tokens, start_paren, ')')
# is there a comma before the close paren?
i = close_paren - 1
while tokens[i].name in NON_CODING_TOKENS:
i -= 1
if ',' not in tokens[i].src:
tokens.insert(i + 1, Token('OP', ', '))
tokens.insert(i + 2, Token('NAME', 'default'))
tokens.insert(i + 3, Token('OP', '='))
tokens.insert(i + 4, Token('NUMBER', 'None'))
else:
tokens.insert(i + 1, Token('NAME', 'default'))
tokens.insert(i + 2, Token('OP', '='))
tokens.insert(i + 3, Token('NUMBER', 'None'))
@register(ast.Call)
def visit_Call(
state: State,
node: ast.Call,
parent: ast.AST,
) -> Iterable[tuple[Offset, TokenFunc]]:
if (
isinstance(node.func, ast.Attribute) and
| is_simple_expression(node.func.value, state.aliases) and |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: I-am-PUID-0/pd_zurg
# Path: rclone_rd/rclone.py
def get_port_from_config(config_file_path, key_type):
def setup():
RCLONEMN_RD = f"{RCLONEMN}_RD"
RCLONEMN_AD = f"{RCLONEMN}_AD"
RCLONEMN_RD = RCLONEMN_AD = RCLONEMN
# Path: cleanup/duplicate_cleanup.py
def delete_media_with_retry(media):
def process_tv_shows():
def process_movies():
def setup():
def cleanup_interval():
def cleanup_schedule():
def start_cleanup():
def cleanup_thread():
# Path: update/auto_update.py
def auto_update(self, process_name, enable_update):
if enable_update:
self.logger.info(f"Automatic updates set to {format_time(self.auto_update_interval())} for {process_name}")
self.schedule_thread = threading.Thread(target=self.update_schedule)
self.schedule_thread.start()
self.start_process(process_name)
else:
self.logger.info(f"Automatic update disabled for {process_name}")
self.start_process(process_name)
# Path: main.py
from base import *
from rclone_rd import rclone
from cleanup import duplicate_cleanup
from update import auto_update
import plex_debrid_ as p
import zurg as z
def main():
logger = get_logger()
version = '2.0.1'
ascii_art = f'''
_______ ______ _______ _______ _______
( ____ )( __ \ / ___ )|\ /|( ____ )( ____ \\
| ( )|| ( \ ) \/ ) || ) ( || ( )|| ( \/
| (____)|| | ) | / )| | | || (____)|| |
| _____)| | | | / / | | | || __)| | ____
| ( | | ) | / / | | | || (\ ( | | \_ )
| ) | (__/ ) / (_/\| (___) || ) \ \__| (___) |
|/ (______/_____(_______/(_______)|/ \__/(_______)
(_____)
Version: {version}
'''
logger.info(ascii_art.format(version=version) + "\n" + "\n")
def healthcheck():
while True:
time.sleep(10)
try:
result = subprocess.run(['python', 'healthcheck.py'], capture_output=True, text=True)
if result.stderr:
logger.error(result.stderr.strip())
except Exception as e:
logger.error('Error running healthcheck.py: %s', e)
time.sleep(50)
thread = threading.Thread(target=healthcheck)
thread.daemon = True
thread.start()
try:
if ZURG is None or str(ZURG).lower() == 'false':
pass
elif str(ZURG).lower() == 'true':
try:
if RDAPIKEY or ADAPIKEY:
try:
z.setup.zurg_setup()
z_updater = z.update.ZurgUpdate()
if ZURGUPDATE:
| z_updater.auto_update('Zurg',True) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: JeffersonQin/DungeonAssistant
# Path: utils/o3dobj.py
def get_o3d_unit_block_at_origin():
def get_o3d_trajectory_object(points, color=(1, 0, 0)):
def transform_o3d_format(points):
# Path: utils/io.py
def load_point_clouds(
pointcloud_base, pointcloud_prefix, merge_cnt, overlap_discard_num, voxel_size=0.0
):
def load_coordinates_and_timestamps(json_file):
def load_transformation_matrices(transformation_dir: str):
def save_coodinates_and_timestamps(json_file, points, timestamps):
# Path: utils/tfm.py
def transform_trajectory(points, transformation):
def transform_clouds_and_trajectories(clouds, trajectories, matrices):
def retrieve_floor_plan(cloud, scale=100):
# Path: registration.py
import json
import argparse
import os
import os.path as osp
import time
import open3d as o3d
import numpy as np
import copy
import matplotlib.pyplot as plt
from utils import o3dobj
from utils import io
from utils import tfm
default=0.05,
help="voxel size for global fast registration downsampling. default is 0.05",
)
parser.add_argument(
"--voxel_size_icp",
type=float,
default=0.05,
help="voxel size for icp downsampling. default is 0.05",
)
parser.add_argument("--skip_icp", action="store_true", help="skip icp and only run fgr")
parser.add_argument(
"--transformed_trajectory_out",
type=str,
default="trajectory_1.jsonl",
help="output trajectory of the transformed trajectory 1 (to trajectory 2)",
)
args = parser.parse_args()
pointcloud_file_path_1 = args.pointcloud1
pointcloud_file_path_2 = args.pointcloud2
trajectory_file_path_1 = args.trajectory1
trajectory_file_path_2 = args.trajectory2
def preprocess_point_cloud(pcd, voxel_size):
"""Downsamples the point cloud and computes the normals and FPFH features"""
print(f":: Downsample with a voxel size {voxel_size:.3f}.")
pcd_down = pcd.voxel_down_sample(voxel_size)
radius_normal = voxel_size * 2
print(f":: Estimate normal with search radius {radius_normal:.3f}.")
pcd_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30)
)
radius_feature = voxel_size * 5
print(f":: Compute FPFH feature with search radius {radius_feature:.3f}.")
pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
pcd_down,
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100),
)
return pcd_down, pcd_fpfh
def prepare_dataset(voxel_size):
"""Loads two point clouds and downsamples them."""
print(":: Load two point clouds")
source = o3d.io.read_point_cloud(pointcloud_file_path_1)
target = o3d.io.read_point_cloud(pointcloud_file_path_2)
source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)
target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)
return source, target, source_down, target_down, source_fpfh, target_fpfh
def execute_fast_global_registration(
source_down, target_down, source_fpfh, target_fpfh, voxel_size
):
"""Performs fast global registration on the downsampled point clouds"""
distance_threshold = voxel_size * 0.5
print(
f":: Apply fast global registration with distance threshold {distance_threshold:.3f}"
)
result = o3d.pipelines.registration.registration_fgr_based_on_feature_matching(
source_down,
target_down,
source_fpfh,
target_fpfh,
o3d.pipelines.registration.FastGlobalRegistrationOption(
maximum_correspondence_distance=distance_threshold
),
)
return result
def execute_vanilla_icp(source, target):
"""Performs vanilla ICP on the point clouds"""
estimation = o3d.pipelines.registration.TransformationEstimationPointToPlane()
max_correspondence_distance = 0.5
# Convergence-Criteria for Vanilla ICP
criteria = o3d.pipelines.registration.ICPConvergenceCriteria(
relative_fitness=0.000001, relative_rmse=0.000001, max_iteration=50
)
result = o3d.pipelines.registration.registration_icp(
source,
target,
max_correspondence_distance,
estimation_method=estimation,
criteria=criteria,
)
return result
if __name__ == "__main__":
voxel_size_fgr = args.voxel_size_fgr
voxel_size_icp = args.voxel_size_icp
(
cloud_1,
cloud_2,
cloud_1_down,
cloud_2_down,
cloud_1_fpfh,
cloud_2_fpfh,
) = prepare_dataset(voxel_size=voxel_size_fgr)
color_1 = [0.9450980392, 0.5764705882, 0.7098039216]
color_2 = [0.11, 0.72, 0.89]
cloud_1.paint_uniform_color(color_1)
cloud_2.paint_uniform_color(color_2)
cloud_1_down.paint_uniform_color(color_1)
cloud_2_down.paint_uniform_color(color_2)
# axis
axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0, 0, 0])
# unit block
| unit_block = o3dobj.get_o3d_unit_block_at_origin() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: KAIST-VICLab/From_Ground_To_Objects
# Path: networks/layers.py
class ConvBlock(nn.Module):
"""Layer to perform a convolution followed by ELU
"""
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv = Conv3x3(in_channels, out_channels)
self.nonlin = nn.ELU(inplace=True)
def forward(self, x):
out = self.conv(x)
out = self.nonlin(out)
return out
# Path: networks/layers.py
class Conv3x3(nn.Module):
"""Layer to pad and convolve input
"""
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv3x3, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(1)
else:
self.pad = nn.ZeroPad2d(1)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
# Path: networks/layers.py
def upsample(x):
"""Upsample input tensor by a factor of 2
"""
return F.interpolate(x, scale_factor=2, mode="nearest")
# Path: networks/layers.py
def disp_to_depth(disp, min_depth, max_depth):
"""Convert network's sigmoid output into depth prediction
The formula for this conversion is given in the 'additional considerations'
section of the paper.
"""
min_disp = 1 / max_depth
max_disp = 1 / min_depth
scaled_disp = min_disp + (max_disp - min_disp) * disp
depth = 1 / scaled_disp
return scaled_disp, depth
# Path: networks/layers.py
def coords_to_normals(coords):
"""Calculate surface normals using first order finite-differences.
https://github.com/voyleg/perceptual-depth-sr/
Parameters
----------
coords : array_like
Coordinates of the points (**, 3, h, w).
Returns
-------
normals : torch.Tensor
Surface normals (**, 3, h, w).
"""
coords = torch.as_tensor(coords)
if coords.ndim < 4:
coords = coords[None]
dxdu = coords[..., 0, :, 1:] - coords[..., 0, :, :-1]
dydu = coords[..., 1, :, 1:] - coords[..., 1, :, :-1]
dzdu = coords[..., 2, :, 1:] - coords[..., 2, :, :-1]
dxdv = coords[..., 0, 1:, :] - coords[..., 0, :-1, :]
dydv = coords[..., 1, 1:, :] - coords[..., 1, :-1, :]
dzdv = coords[..., 2, 1:, :] - coords[..., 2, :-1, :]
dxdu = torch.nn.functional.pad(dxdu, (0, 1), mode='replicate')
dydu = torch.nn.functional.pad(dydu, (0, 1), mode='replicate')
dzdu = torch.nn.functional.pad(dzdu, (0, 1), mode='replicate')
# pytorch cannot just do `dxdv = torch.nn.functional.pad(dxdv, (0, 0, 0, 1), mode='replicate')`, so
dxdv = torch.cat([dxdv, dxdv[..., -1:, :]], dim=-2)
dydv = torch.cat([dydv, dydv[..., -1:, :]], dim=-2)
dzdv = torch.cat([dzdv, dzdv[..., -1:, :]], dim=-2)
n_x = dydv * dzdu - dydu * dzdv
n_y = dzdv * dxdu - dzdu * dxdv
n_z = dxdv * dydu - dxdu * dydv
n = torch.stack([n_x, n_y, n_z], dim=-3)
n = torch.nn.functional.normalize(n, dim=-3)
return n
# Path: networks/depth_decoder.py
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from .layers import ConvBlock, Conv3x3, upsample, disp_to_depth, coords_to_normals
from timm.models.layers import trunc_normal_
from .cadepth import SPM, DEM
# Copyright Niantic 2021. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the ManyDepth licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
class DepthDecoder(nn.Module):
def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True,
opt=None, backproject_depth=None, min_depth=0.1, max_depth=100):
super(DepthDecoder, self).__init__()
self.num_output_channels = num_output_channels
self.use_skips = use_skips
self.upsample_mode = 'nearest'
self.scales = scales
self.opt = opt
self.num_ch_enc = num_ch_enc
self.num_ch_dec = np.array([16, 32, 64, 128, 256])
self.backproject_depth = backproject_depth
self.min_depth = min_depth
self.max_depth = max_depth
# decoder
self.convs = OrderedDict()
for i in range(4, -1, -1):
# upconv_0
num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1]
if self.opt["use_surface_normal"] and i != 4:
num_ch_in += 3
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out)
# upconv_1
num_ch_in = self.num_ch_dec[i]
if self.use_skips and i > 0:
num_ch_in += self.num_ch_enc[i - 1]
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 1)] = ConvBlock(num_ch_in, num_ch_out)
if self.opt['cadepth']:
self.convs[("dem", i)] = DEM(num_ch_in)
for s in self.scales:
| self.convs[("dispconv", s)] = Conv3x3(self.num_ch_dec[s], self.num_output_channels) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: marc-rigter/polygrad-world-models
# Path: polygrad/utils/training.py
class EMA():
'''
empirical moving average
'''
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_model_average(self, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = self.update_average(old_weight, up_weight)
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
# Path: polygrad/utils/evaluation.py
def get_standardized_stats(policy_distr, act):
# Compute logprob with all action distributions normalized to standard normal.
policy_mean = policy_distr.mean
policy_std = policy_distr.stddev
standard_normal = D.independent.Independent(D.normal.Normal(torch.zeros_like(policy_mean), torch.ones_like(policy_mean)), 1)
normed_act = (act - policy_mean) / policy_std
standard_logprob = standard_normal.log_prob(normed_act)
act_stds = torch.std(normed_act, dim=[0, 1])
act_means = torch.mean(normed_act, dim=[0, 1])
return standard_logprob, act_stds, act_means
# Path: polygrad/agent/a2c.py
import torch
import copy
import torch.nn as nn
import copy
import torch.nn.functional as F
import torch.distributions as D
import importlib
import wandb
from torch import Tensor
from polygrad.utils.training import EMA
from .functions import *
from .common import *
from polygrad.utils.evaluation import get_standardized_stats
class ActorCritic(nn.Module):
def __init__(self,
in_dim,
out_actions,
normalizer,
device="cuda:0",
hidden_dim=256,
min_std=0.01,
fixed_std=False,
decay_std_steps=500000,
init_std=0.5,
hidden_layers=2,
layer_norm=True,
gamma=0.999,
ema=0.995,
lambda_gae=0.8,
entropy_weight=1e-3,
entropy_target=-1,
tune_entropy=True,
target_interval=100,
lr_actor=1e-4,
lr_critic=3e-4,
lr_alpha=1e-2,
actor_grad='reinforce',
actor_dist='normal_tanh',
normalize_adv=False,
grad_clip=None,
clip_logprob=True,
min_logprob=-10.0,
learned_std=False,
ac_use_normed_inputs=True,
target_update=0.02,
tune_actor_lr=3e-4,
lr_schedule='constant',
lr_decay_steps=1000000,
log_interval=20000,
linesearch=False,
linesearch_tolerance=0.25,
linesearch_ratio=0.8,
**kwargs
):
super().__init__()
self.in_dim = in_dim
self.action_dim = out_actions
self.gamma = gamma
self.lambda_ = lambda_gae
self.target_interval = target_interval
self.actor_grad = actor_grad
self.actor_dist = actor_dist
self.min_std = min_std
self.clip_logprob = clip_logprob
self.normalizer = normalizer
self.min_logprob = min_logprob * self.action_dim
self.learned_std = learned_std
self.fixed_std = fixed_std
self.decay_std_steps = decay_std_steps
self.init_std = init_std
self.current_std = init_std
self.use_normed_inputs = ac_use_normed_inputs
self.lr_decay_steps = lr_decay_steps
self.log_interval = log_interval
self.last_log = -float('inf')
self.linesearch = linesearch
self.linesearch_tolerance = linesearch_tolerance
self.linesearch_ratio = linesearch_ratio
if not self.fixed_std and not self.learned_std:
actor_out_dim = 2 * out_actions
else:
actor_out_dim = out_actions
self.actor = MLP(in_dim, actor_out_dim, hidden_dim, hidden_layers, layer_norm).to(device)
self.critic = MLP(in_dim, 1, hidden_dim, hidden_layers, layer_norm).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_target.requires_grad_(False)
| self.ema = EMA(ema) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Chat-3D/Chat-3D-v2
# Path: utils/distributed.py
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
# Path: utils/distributed.py
def is_main_process():
return get_rank() == 0
# Path: utils/logger.py
import functools
import logging
import os
import sys
import time
import wandb
import torch
from typing import Any, Dict, Union
from .distributed import get_rank, is_main_process
from termcolor import colored
from torch.utils.tensorboard import SummaryWriter
# from MMF: https://github.com/facebookresearch/mmf/blob/master/mmf/utils/logger.py
# Copyright (c) Facebook, Inc. and its affiliates.
def log_dict_to_wandb(log_dict, step, prefix=""):
"""include a separator `/` at the end of `prefix`"""
if not is_main_process():
return
log_dict = {f"{prefix}{k}": v for k, v in log_dict.items()}
wandb.log(log_dict, step)
def setup_wandb(config):
if not (config.wandb.enable and is_main_process()):
return
run = wandb.init(
config=config,
project=config.wandb.project,
entity=config.wandb.entity,
name=os.path.basename(config.output_dir),
reinit=True
)
return run
def setup_output_folder(save_dir: str, folder_only: bool = False):
"""Sets up and returns the output file where the logs will be placed
based on the configuration passed. Usually "save_dir/logs/log_<timestamp>.txt".
If env.log_dir is passed, logs will be directly saved in this folder.
Args:
folder_only (bool, optional): If folder should be returned and not the file.
Defaults to False.
Returns:
str: folder or file path depending on folder_only flag
"""
log_filename = "train_"
log_filename += time.strftime("%Y_%m_%dT%H_%M_%S")
log_filename += ".log"
log_folder = os.path.join(save_dir, "logs")
if not os.path.exists(log_folder):
os.path.mkdirs(log_folder)
if folder_only:
return log_folder
log_filename = os.path.join(log_folder, log_filename)
return log_filename
def setup_logger(
output: str = None,
color: bool = True,
name: str = "mmf",
disable: bool = False,
clear_handlers=True,
*args,
**kwargs,
):
"""
Initialize the MMF logger and set its verbosity level to "INFO".
Outside libraries shouldn't call this in case they have set there
own logging handlers and setup. If they do, and don't want to
clear handlers, pass clear_handlers options.
The initial version of this function was taken from D2 and adapted
for MMF.
Args:
output (str): a file name or a directory to save log.
If ends with ".txt" or ".log", assumed to be a file name.
Default: Saved to file <save_dir/logs/log_[timestamp].txt>
color (bool): If false, won't log colored logs. Default: true
name (str): the root module name of this logger. Defaults to "mmf".
disable: do not use
clear_handlers (bool): If false, won't clear existing handlers.
Returns:
logging.Logger: a logger
"""
if disable:
return None
logger = logging.getLogger(name)
logger.propagate = False
logging.captureWarnings(True)
warnings_logger = logging.getLogger("py.warnings")
plain_formatter = logging.Formatter(
"%(asctime)s | %(levelname)s | %(name)s : %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
| distributed_rank = get_rank() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: SqueezeBits/owlite
# Path: owlite/logger.py
class Logger(logging.Logger):
class _WarningFilterContext:
class WarningFilter(logging.Filter):
ENV_VAR = "OWLITE_LOG_LEVEL"
DEBUG_WARNING = 15
ULTRA_VERBOSE = -10
def ignore_warnings(self):
def __init__(self, logger) -> None:
def __enter__(self):
def filter(self, record):
def __exit__(self, exc_type, exc_val, exc_tb):
def debug_warning(self, msg, *args, **kwargs):
def level(self) -> int:
def level(self, value):
def suppress_owlite_warnings(cls):
def new_init(self, *args, **kwargs):
# Path: owlite/calib/_histogram_calibrator.py
class _HistogramCalibrator(_Calibrator):
"""Histogram calibrator.
Attributes:
set_attr_list (Dict[str, torch.Tensor]): Initialized properties to register with the quantizer.
'histogram': histogram count. Default [0, ..., 0], len = 2048.
'bin_edges': histogram edges. Default [0, ..., 0], len = 2048.
'histc_bins': integer. number of histogram bins. Default 2048.
"""
def __init__(self, quantizer):
"""Initializes for histogram calibrator"""
super().__init__(quantizer)
self.set_attr_list = {}
def update(self):
raise NotImplementedError
def prepare(self):
# define forward hook function
def histogram_forward_hook_func(module, inputs, output):
"""Forward hook function to get histogram value"""
_input = inputs[0].clone()
if module.is_enabled:
raise RuntimeError(
"The quantizer should be disabled during calibration."
)
if (
module.symmetric.item()
and module.unsigned.item()
and inputs[0].min() < 0
):
log.warning(
"The unsigned fake quantizer has a negative number as input. "
"It will automatically convert to a signed fake quantizer.",
stacklevel=2,
)
module.invert_signedness()
with torch.no_grad():
new_input = []
if module.per_channel:
_channel_axis = 0
_channel_size = _input.shape[_channel_axis]
for chn in range(_channel_size):
_input_chn = torch.select(_input, _channel_axis, chn)
new_input.append(_input_chn)
else:
new_input.append(_input)
# _histc_cuda does not have a deterministic implementation
_deterministic_enable = torch.are_deterministic_algorithms_enabled()
if _deterministic_enable:
torch.use_deterministic_algorithms(False)
for i, val in enumerate(new_input):
local_max = val.abs().max().clone().to(module.bin_edges[i].device)
if (
module.histogram[i].data.sum() == 0
and module.bin_edges[i].data.sum() == 0
):
module.histogram[i].data = torch.histc(
val.abs(),
bins=int(module.histc_bins[i].data),
min=0,
max=local_max,
).to(module.histogram[i].device)
module.bin_edges[i].data = torch.linspace(
0, local_max, int(module.histc_bins[i].data) + 1
).to(module.bin_edges[i].device)
else:
if module.per_channel:
break
if local_max > module.bin_edges[i].data[-1]:
interval = (
module.bin_edges[i].data[1]
- module.bin_edges[i].data[0]
)
module.histc_bins[i].data = torch.Tensor(
[int((local_max / interval).ceil().item())]
)
module.bin_edges[i].data = torch.arange(
0,
local_max + interval,
interval,
device=module.bin_edges[i].device,
)
local_hist = torch.histc(
val.abs(),
bins=int(module.histc_bins[i].data),
min=0,
max=module.bin_edges[i].data[-1],
).to(module.bin_edges[i].device)
local_hist[
: module.histogram[i].data.numel()
] += module.histogram[i].data
module.histogram[i].data = local_hist
# allocate to original state
if _deterministic_enable:
torch.use_deterministic_algorithms(True)
return output
# ~define forward hook function
# set histogram, bin_edges attr and register forward hook
_histogram_size = 2048
if self.quantizer.per_channel:
_channel_size = self.quantizer.step_size.shape[0]
else:
_channel_size = 1
device = self.quantizer.step_size.device
self.set_attr_list = {
"histogram": [
torch.zeros(_histogram_size).to(device) for _ch in range(_channel_size)
],
"bin_edges": [
torch.zeros(_histogram_size + 1).to(device)
for _ch in range(_channel_size)
],
"histc_bins": [
torch.Tensor([_histogram_size]).to(device)
for _ch in range(_channel_size)
],
}
for attr, default in self.set_attr_list.items():
if hasattr(self.quantizer, attr):
raise AttributeError(f"In Quantizer, {attr} attribution already exists")
setattr(self.quantizer, attr, default)
self.hook_handler = self.quantizer.register_forward_hook(
histogram_forward_hook_func
)
# Path: owlite/calib/mse_calibrator.py
import torch
from ..logger import log
from ._histogram_calibrator import _HistogramCalibrator
"""MSE(Mean Squared Error) calibrator"""
class MSECalibrator(_HistogramCalibrator):
"""MSE Calibrator Class"""
def update(self):
# update step_size using "mse"
if self.quantizer.histogram is None or self.quantizer.bin_edges is None:
| log.error(f"quantizer.histogram : {self.quantizer.histogram}") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ximinng/PyTorch-SVGRender
# Path: pytorch_svgrender/svgtools/shape.py
def circle_tag(cx: float, cy: float, r: float, transform: str = None):
attrib = {
'cx': f'{cx}', 'cy': f'{cy}', 'r': f'{r}'
}
if transform is not None:
attrib['transform'] = transform
_circle = ET.Element('circle', attrib) # tag, attrib
return _circle
# Path: pytorch_svgrender/svgtools/shape.py
def rect_tag(
x: float, y: float, rx: float, ry: float,
width: float = 600, height: float = 600,
transform: str = None
):
attrib = {
'x': f'{x}', 'y': f'{y}', 'rx': f'{rx}', 'ry': f'{ry}',
'width': f'{width}', 'height': f'{height}'
}
if transform is not None:
attrib['transform'] = transform
_rect = ET.Element('rect', attrib) # tag, attrib
return _rect
# Path: pytorch_svgrender/svgtools/type.py
def is_valid_svg(file_path: AnyStr) -> bool:
try:
tree = ET.parse(file_path)
root = tree.getroot()
if root.tag.endswith('svg') and 'xmlns' in root.attrib:
return True
else:
return False
except ET.ParseError:
return False
# Path: pytorch_svgrender/svgtools/process.py
import xml.etree.ElementTree as ET
import omegaconf
from typing import Tuple
from .shape import circle_tag, rect_tag
from .type import is_valid_svg
# -*- coding: utf-8 -*-
# Author: ximing
# Description: process
# Copyright (c) 2023, XiMing Xing.
# License: MIT License
def delete_empty_path(input_svg: str, output_svg: str):
is_valid_svg(input_svg)
# read svg
tree = ET.parse(input_svg)
root = tree.getroot()
group = ET.Element('g')
for i, element in enumerate(root.iter()):
element.tag = element.tag.split('}')[-1]
if element.tag == 'path':
if element.get('d') == 'C NaN NaN' or element.get('d') == '':
continue
group.append(element)
# new svg
svg = ET.Element('svg',
xmlns="http://www.w3.org/2000/svg",
version='1.1',
width=root.get('width'),
height=root.get('height'),
viewBox=root.get('viewBox'))
svg.append(group)
tree = ET.ElementTree(svg)
tree.write(output_svg, encoding='utf-8', xml_declaration=True)
def add_clipPath2def(mounted_node: ET.Element, tag_name: str, attrs: omegaconf.DictConfig):
# add defs node
defs = ET.SubElement(mounted_node, 'defs') # parent=mounted_node, tag='defs'
if tag_name == 'none':
return None
# add clipPath node
id = 'def_clip'
_circleClip = ET.SubElement(defs, 'clipPath', id='def_clip') # parent=defs, tag='clipPath'
# add ops
if tag_name == 'circle_clip':
_circleClip.append(
| circle_tag(cx=attrs.cx, cy=attrs.cy, r=attrs.r) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lyhisme/DeST
# Path: libs/models/graph/graph.py
class Graph:
def __init__(self, labeling_mode='spatial', layout='MCFS-22'):
self.get_edge(layout)
self.A = self.get_adjacency_matrix(labeling_mode)
def get_edge(self, layout):
if layout == 'MCFS-22' or layout == 'MCFS-130':
self.num_node = 25
self.self_link = [(i, i) for i in range(self.num_node)]
inward_ori_index = [(2,9), (1,2), (16,1), (18,16), (17,1), (19,17), (6,2),
(7,6), (8,7), (3,2), (4,3), (5,4), (10,9),
(11, 10), (12, 11), (25, 12), (23, 12), (24, 23), (13,9),
(14, 13), (15, 14), (22, 15), (20, 15), (21, 20)]
self.inward = [(i - 1, j - 1) for (i, j) in inward_ori_index]
self.outward = [(j, i) for (i, j) in self.inward]
self.neighbor = self.inward + self.outward
elif layout == 'PKU-subject' or layout == 'PKU-view':
self.num_node = 25
self.self_link = [(i, i) for i in range(self.num_node)]
self.inward = [(12, 0), (13, 12), (14, 13), (15, 14), (16, 0), (17, 16),
(18, 17), (19, 18), (1, 0), (20, 1), (2, 20), (3, 2), (4,20),
(5,4), (6,5), (7,6), (21,7), (22,6), (8,20), (9,8), (10, 9),
(11,10), (24,10), (23,11)]
self.outward = [(j, i) for (i, j) in self.inward]
self.neighbor = self.inward + self.outward
elif layout == 'LARA':
self.num_node = 19
self.self_link = [(i, i) for i in range(self.num_node)]
self.inward = [(1, 0), (2, 1), (3, 2), (4, 3), (5, 0), (6, 5), (7, 6), (8, 7), (9, 0), (10, 9), (11, 9), (12,10), (13,12), (14,13), (15,9), (16,15), (17,16), (18,17)]
self.outward = [(j, i) for (i, j) in self.inward]
self.neighbor = self.inward + self.outward
else:
raise ValueError("Do Not Exist This Layout.")
def get_adjacency_matrix(self, labeling_mode=None):
if labeling_mode is None:
return self.A
if labeling_mode == 'spatial':
A = tools.get_spatial_graph(self.num_node, self.self_link, self.inward, self.outward)
else:
raise ValueError()
return A
# Path: libs/models/graph/tools.py
def k_adjacency(A, k, with_self=False, self_factor=1):
assert isinstance(A, np.ndarray)
I = np.eye(len(A), dtype=A.dtype)
if k == 0:
return I
Ak = np.minimum(np.linalg.matrix_power(A + I, k), 1) \
- np.minimum(np.linalg.matrix_power(A + I, k - 1), 1)
if with_self:
Ak += (self_factor * I)
return Ak
# Path: libs/models/graph/tools.py
def normalize_adjacency_matrix(A):
node_degrees = A.sum(-1)
degs_inv_sqrt = np.power(node_degrees, -0.5)
norm_degs_matrix = np.eye(len(node_degrees)) * degs_inv_sqrt
return (norm_degs_matrix @ A @ norm_degs_matrix).astype(np.float32)
# Path: libs/models/graph/tools.py
def get_adjacency_matrix(edges, num_nodes=25):
A = np.zeros((num_nodes, num_nodes), dtype=np.float32)
for edge in edges:
A[edge] = 1.
return A
# Path: libs/models/SP.py
import torch
import torch.nn as nn
import numpy as np
from .graph.graph import Graph
from .graph.tools import k_adjacency, normalize_adjacency_matrix, get_adjacency_matrix
class MultiScale_GraphConv(nn.Module):
def __init__(self,
num_scales, # 13
in_channels,
out_channels,
dataset,
disentangled_agg=True,
use_mask=True,
dropout=0,
activation='relu'):
super().__init__()
| self.graph = Graph(labeling_mode='spatial', layout=dataset) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: soCzech/GenHowTo
# Path: genhowto_utils.py
def load_genhowto_model(weights_path, device="cpu"):
with open(os.path.join(weights_path, "GenHowTo_controlnet_config.json")) as file:
gef_controlnet_config = json.load(file)
controlnet = ControlNetModel.from_config(gef_controlnet_config, torch_dtype=torch.float32)
# patch forward function of the ControlNet conditioning embedding
controlnet.controlnet_cond_embedding.forward = GenHowTo_ControlNetConditioningEmbedding_forward.__get__(
controlnet.controlnet_cond_embedding, ControlNetConditioningEmbedding)
# load weights for the ControlNet
controlnet.load_state_dict(torch.load(os.path.join(weights_path, "GenHowTo_controlnet.pth"), map_location="cpu"))
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"stabilityai/stable-diffusion-2", controlnet=controlnet, torch_dtype=torch.float32)
# load our fine-tuned weights for the UNet
pipe.unet.load_state_dict(torch.load(os.path.join(weights_path, "GenHowTo_sdunet.pth"), map_location="cpu"))
# change image preprocessor to our custom one which uses VAE to preprocess input images
pipe.control_image_processor = GenHowToControlImagePreprocessor(pipe)
# our model is trained to predict noise directly - we do not use "v_prediction" used by stabilityai/stable-diffusion-2
pipe.scheduler.config.prediction_type = "epsilon"
pipe.scheduler.config["prediction_type"] = "epsilon"
pipe = pipe.to(device)
if device == "cpu":
return pipe
try:
pipe.enable_xformers_memory_efficient_attention()
except:
print("Failed to enable memory efficient attention, continuing without it.")
return pipe
# Path: genhowto_utils.py
class DDIMSkipScheduler(DDIMScheduler):
@register_to_config
def __init__(self,
num_train_timesteps: int = 1000,
beta_start: float = 0.0001,
beta_end: float = 0.02,
beta_schedule: str = "linear",
trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
clip_sample: bool = True,
set_alpha_to_one: bool = True,
steps_offset: int = 0,
prediction_type: str = "epsilon",
thresholding: bool = False,
dynamic_thresholding_ratio: float = 0.995,
clip_sample_range: float = 1.0,
sample_max_value: float = 1.0,
timestep_spacing: str = "leading",
rescale_betas_zero_snr: bool = False):
super().__init__(
num_train_timesteps,
beta_start,
beta_end,
beta_schedule,
trained_betas,
clip_sample,
set_alpha_to_one,
steps_offset,
prediction_type,
thresholding,
dynamic_thresholding_ratio,
clip_sample_range,
sample_max_value,
timestep_spacing,
rescale_betas_zero_snr)
self.num_steps_to_skip = None
def set_num_steps_to_skip(self, num_steps_to_skip: int, num_inference_steps: int):
self.num_steps_to_skip = num_steps_to_skip
self.set_timesteps(num_inference_steps)
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
super().set_timesteps(num_inference_steps, device)
if self.num_steps_to_skip is None:
return
if self.num_steps_to_skip >= num_inference_steps:
raise ValueError(
f"`self.num_steps_to_skip`: {self.num_steps_to_skip} cannot be larger or equal to "
f"`num_inference_steps`: {num_inference_steps}."
)
if self.config.timestep_spacing != "leading":
raise ValueError(
f"`self.config.timestep_spacing`: {self.config.timestep_spacing} must be `leading` "
f"if `num_steps_to_skip` is not None."
)
self.timesteps = self.timesteps[self.num_steps_to_skip:]
# Path: genhowto.py
import os
import math
import torch
import argparse
import numpy as np
from PIL import Image
from genhowto_utils import load_genhowto_model, DDIMSkipScheduler
def main(args):
if os.path.exists(args.output_path):
print(f"{args.output_path} already exists.")
return
pipe = load_genhowto_model(args.weights_path, device=args.device)
pipe.scheduler.set_timesteps(args.num_inference_steps)
if args.num_steps_to_skip is not None: # possibly do not start from complete noise
| pipe.scheduler = DDIMSkipScheduler.from_config(pipe.scheduler.config) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: bolna-ai/bolna
# Path: bolna/helpers/logger_config.py
def configure_logger(file_name, enabled=True, logging_level='INFO'):
if logging_level not in VALID_LOGGING_LEVELS:
logging_level = "INFO"
logging.basicConfig(
level=logging_level,
format="%(asctime)s.%(msecs)03d %(levelname)s {%(module)s} [%(funcName)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(file_name)
if not enabled:
logger.disabled = True
return logger
# Path: bolna/constants.py
PREPROCESS_DIR = 'agent_data'
# Path: bolna/helpers/utils.py
import json
import asyncio
import re
import numpy as np
import copy
import hashlib
import os
import traceback
import ast
from botocore.exceptions import BotoCoreError, ClientError
from aiobotocore.session import AioSession
from contextlib import AsyncExitStack
from dotenv import load_dotenv
from pydantic import BaseModel, create_model
from .logger_config import configure_logger
from bolna.constants import PREPROCESS_DIR
logger = configure_logger(__name__)
load_dotenv()
BUCKET_NAME = os.getenv('BUCKET_NAME')
def load_file(file_path, is_json=False):
data = None
with open(file_path, "r") as f:
if is_json:
data = json.load(f)
else:
data = f.read()
return data
def write_json_file(file_path, data):
with open(file_path, 'w') as file:
json.dump(data, file, indent=4, ensure_ascii=False)
def create_ws_data_packet(data, meta_info=None, is_md5_hash=False, llm_generated=False):
metadata = copy.deepcopy(meta_info)
if meta_info is not None: #It'll be none in case we connect through dashboard playground
metadata["is_md5_hash"] = is_md5_hash
metadata["llm_generated"] = llm_generated
return {
'data': data,
'meta_info': metadata
}
def int2float(sound):
abs_max = np.abs(sound).max()
sound = sound.astype('float32')
if abs_max > 0:
sound *= 1 / 32768
sound = sound.squeeze() # depends on the use case
return sound
def float2int(sound):
sound = np.int16(sound * 32767)
return sound
def mu_law_encode(audio, quantization_channels=256):
mu = quantization_channels - 1
safe_audio_abs = np.minimum(np.abs(audio), 1.0)
magnitude = np.log1p(mu * safe_audio_abs) / np.log1p(mu)
signal = np.sign(audio) * magnitude
return ((signal + 1) / 2 * mu + 0.5).astype(np.int32)
def raw_to_mulaw(raw_bytes):
# Convert bytes to numpy array of int16 values
samples = np.frombuffer(raw_bytes, dtype=np.int16)
samples = samples.astype(np.float32) / (2 ** 15)
mulaw_encoded = mu_law_encode(samples)
return mulaw_encoded
async def get_s3_file(bucket_name, file_key):
session = AioSession()
async with AsyncExitStack() as exit_stack:
s3_client = await exit_stack.enter_async_context(session.create_client('s3'))
try:
response = await s3_client.get_object(Bucket=bucket_name, Key=file_key)
except (BotoCoreError, ClientError) as error:
logger.error(error)
else:
file_content = await response['Body'].read()
return file_content
async def put_s3_file(bucket_name, file_key, file_data, content_type):
session = AioSession()
async with AsyncExitStack() as exit_stack:
s3_client = await exit_stack.enter_async_context(session.create_client('s3'))
data = None
if content_type == "json":
data = json.dumps(file_data)
elif content_type in ["mp3", "wav", "pcm"]:
data = file_data
try:
await s3_client.put_object(Bucket=bucket_name, Key=file_key, Body=data)
except (BotoCoreError, ClientError) as error:
logger.error(error)
except Exception as e:
logger.error('Exception occurred while s3 put object: {}'.format(e))
async def get_raw_audio_bytes_from_base64(agent_name, b64_string, audio_format='mp3', user_id = None, assistant_id=None, local = False):
# we are already storing pcm formatted audio in the filler config. No need to encode/decode them further
audio_data = None
if local:
| file_name = f"{PREPROCESS_DIR}/{agent_name}/{audio_format}/{b64_string}.{audio_format}" |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: relari-ai/continuous-eval
# Path: continuous_eval/llm_factory.py
GOOGLE_GENAI_AVAILABLE = True
GOOGLE_GENAI_AVAILABLE = False
ANTHROPIC_AVAILABLE = True
ANTHROPIC_AVAILABLE = False
class LLMInterface(ABC):
class LLMFactory(LLMInterface):
def run(self, prompt, temperature=0):
def __init__(self, model):
def _llm_response(self, prompt, temperature):
def run(self, prompt, temperature=0):
# Path: continuous_eval/metrics/base.py
class LLMBasedMetric(Metric):
"""
Base class for all LLM based metrics.
"""
def __init__(self, model: LLMInterface = DefaultLLM):
super().__init__()
assert isinstance(model, LLMInterface), "model must be an instance of LLMInterface."
self._llm = model
# Path: continuous_eval/metrics/retrieval_LLM_based_metrics.py
class LLMBasedContextCoverage(LLMBasedMetric):
def __init__(self, model: LLMInterface = DefaultLLM, use_few_shot: bool = True):
super().__init__(model)
self.use_few_shot = use_few_shot
def __str__(self):
return f"LLMBasedContextCoverage(model={self.model}, use_few_shot={self.use_few_shot})"
def calculate(self, question, retrieved_contexts, answer, **kwargs):
"""
Calculate the context relevance score for the given datapoint.
"""
context = "\n".join(retrieved_contexts)
few_shot_prompt = (
"""Example:
question: What are the main characteristics of Jupiter?
context: Jupiter is the fifth planet from the Sun and the largest in the Solar System. It is a gas giant with a mass more than two and a half times that of all the other planets in the Solar System combined, but less than one-thousandth the mass of the Sun. Jupiter is known for its prominent Great Red Spot, a giant storm larger than Earth that has been ongoing for hundreds of years.
answer: Jupiter is the largest planet in our Solar System and has a giant storm known as the Great Red Spot.
classification:
[
{{
"statement_1":"Jupiter is the largest planet in the Solar System.",
"reason": "This is directly stated in the context.",
"Attributed": 1
}},
{{
"statement_2":"Jupiter is closer to the Sun than Earth.",
"reason": "The context contradicts this, stating Jupiter is the fifth planet from the Sun, while Earth is the third.",
"Attributed": 0
}}
]"""
if self.use_few_shot
else ""
)
prompt = {
"system_prompt": (
"""
Given a question, context, and answer, analyze each statement in the answer and classify if the statement can be attributed to the given context or not. Output JSON strictly in the following format.
"""
+ few_shot_prompt
),
"user_prompt": ("question: " + question + "\ncontext: " + context + "\nanswer: " + answer),
}
content = self._llm.run(prompt)
try:
coverage = self.extract_attributed_from_broken_json(content)
except Exception as e:
print(f"{type(e).__name__} Error: {content}, skipping")
return {
"LLM_based_context_coverage": None,
"LLM_based_context_statements": content,
}
return {
"LLM_based_context_coverage": coverage,
"LLM_based_context_statements": content,
}
@staticmethod
def extract_attributed_from_broken_json(statements):
pattern = r'"Attributed":\s*(\d+)'
attributed_numbers = re.findall(pattern, statements, re.IGNORECASE)
try:
attributed_numbers = [int(num) for group in attributed_numbers for num in group if num]
except Exception as e:
print(f"{type(e).__name__} Error: {attributed_numbers}, skipping")
return None
coverage = sum(attributed_numbers) / len(attributed_numbers) if attributed_numbers else None
return coverage
# Path: continuous_eval/metrics/generation_LLM_based_metrics.py
from continuous_eval.llm_factory import DefaultLLM, LLMInterface
from continuous_eval.metrics.base import LLMBasedMetric
from continuous_eval.metrics.retrieval_LLM_based_metrics import LLMBasedContextCoverage
class LLMBasedFaithfulness(LLMBasedMetric):
"""
The LLM based faithfulness metric.
Measures whether the generated answer is faithful to the retrieved context.
"""
def __init__(
self,
model: LLMInterface = DefaultLLM,
use_few_shot: bool = True,
classify_by_statement: bool = False,
):
super().__init__(model)
self.use_few_shot = use_few_shot
self.classify_by_statement = classify_by_statement
def __str__(self):
return f"LLMBasedFaithfulness(model={self.model}, use_few_shot={self.use_few_shot}, classify_by_statement={self.classify_by_statement})"
def calculate(self, question, retrieved_contexts, answer, **kwargs):
"""
Calculate the faithfulness score for the given datapoint.
"""
if self.classify_by_statement:
# Context coverage uses the same prompt as faithfulness because it calculates how what proportion statements in the answer can be attributed to the context.
# The difference is that faithfulness uses the generated answer, while context coverage uses ground truth answer (to evaluate context).
| context_coverage = LLMBasedContextCoverage(use_few_shot=self.use_few_shot) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ryanhe312/STSSNet-AAAI2024
# Path: utils/matlab_metric.py
def rgb2ycbcr(img, only_y=True):
def calc_metrics(img1, img2, crop_border, test_Y=True, norm=False, mask=None):
def calc_metrics_y(img1, img2, crop_border, test_Y=True):
def calc_psnr(img1, img2, mask=None):
def ssim(img1, img2, mask=None):
def calc_ssim(img1, img2, mask=None):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
# Path: utils/metrics.py
class cvtColor:
def __init__(self) -> None:
def rgb2ycbcr(self, tensor):
def ycrcb2rgb(self, tensor):
def accuracy(output, target):
def top_k_acc(output, target, k=3):
def mse(output, target):
def psnr(output, target, only_y=False):
def ssim(output, target, only_y=False):
R = tensor[:,0:1]
G = tensor[:,1:2]
B = tensor[:,2:3]
Y = self.rgb2ycbcr_coeffs[0] * R + self.rgb2ycbcr_coeffs[1] * G + self.rgb2ycbcr_coeffs[2] * B + self.rgb2ycbcr_coeffs[3]
Y = tensor[:,0:1]
R = self.ycbcr2rgb_coeffs[0] * Y + self.ycbcr2rgb_coeffs[1] * Cb + self.ycbcr2rgb_coeffs[2] * Cr + self.ycbcr2rgb_coeffs[3]
G = self.ycbcr2rgb_coeffs[4] * Y + self.ycbcr2rgb_coeffs[5] * Cb + self.ycbcr2rgb_coeffs[6] * Cr + self.ycbcr2rgb_coeffs[7]
B = self.ycbcr2rgb_coeffs[8] * Y + self.ycbcr2rgb_coeffs[9] * Cb + self.ycbcr2rgb_coeffs[10] * Cr + self.ycbcr2rgb_coeffs[11]
# Path: eval.py
import os
import cv2
import lpips
import torch
import numpy as np
import torch.nn.functional as F
import torch.utils.data as data
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils import matlab_metric, metrics
from dataloaders import *
from model import STSSNet
def ImgWrite(mPath,prefix,idx,img):
cv2.imwrite(os.path.join(mPath,prefix+"."+str(idx).zfill(4)+".png"),img)
@torch.no_grad()
def save_res(dataLoaderIns, model, modelPath, save_dir, save_img=True, mode='all'):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if modelPath.endswith(".tar"):
model_CKPT = torch.load(modelPath, map_location="cuda:0")["state_dict"]
elif modelPath.endswith(".ckpt"):
model_CKPT = {k[6:]:v for k,v in torch.load(modelPath, map_location="cuda:0")["state_dict"].items() if 'vgg' not in k}
else:
model_CKPT = torch.load(modelPath, map_location="cuda:0")
model.load_state_dict(model_CKPT)
model = model.to("cuda:0")
model.eval()
all_PSNR_SF = []
all_ssim_SF = []
all_lpips_SF = []
all_PSNR_IF = []
all_ssim_IF = []
all_lpips_IF = []
loss_fn_alex = lpips.LPIPS(net='alex').cuda()
print('saving to ',save_dir)
f = open(os.path.join(save_dir, 'metrics.csv'), 'w')
print('frame,psnr,ssim,lpips', file=f)
for index, (input,features,mask,hisBuffer,label) in tqdm(dataLoaderIns):
index = index[0].item()
input=input.cuda()
hisBuffer=hisBuffer.cuda()
mask=mask.cuda()
features=features.cuda()
label=label.cuda()
B,C,H,W = input.size()
input = F.pad(input,(0,0,0,4),'replicate')
mask = F.pad(mask,(0,0,0,4),'replicate')
features = F.pad(features,(0,0,0,4),'replicate')
hisBuffer = F.pad(hisBuffer.reshape(B,-1,H,W),(0,0,0,4),'replicate').reshape(B,3,4,H+4,W)
res=model(input, features, mask, hisBuffer)
res = res[:,:,:-8]
## mask
if mode == 'edge':
gray = cv2.cvtColor((label[0].permute(1,2,0).detach().cpu().numpy() * 255).astype(np.uint8), cv2.COLOR_RGB2GRAY)
mask = cv2.Canny(gray, 100, 200)
elif mode == 'hole':
mask = 1 - mask[:, :, :-4]
mask = F.interpolate(mask, scale_factor=2, mode='bilinear').squeeze().cpu().numpy()
else:
mask = None
## calculate metrics
| psnr, ssim = matlab_metric.calc_metrics(res[0].permute(1,2,0).detach().cpu().numpy(), label[0].permute(1,2,0).detach().cpu().numpy(), 0, norm=True, mask=mask) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Seunggu0305/VLCounter
# Path: tools/models/Encoder_utils.py
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
# Path: tools/models/Encoder_utils.py
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, drop_path_rate=0.):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for i in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
# ADDED
def forward_attention(self, x: torch.Tensor):
for index, layer in enumerate(self.resblocks):
if index == len(self.resblocks) - 1:
return layer(x, return_attention=True)
x = layer(x)
# Path: tools/models/Encoder_utils.py
class Attention(nn.Module):
def __init__(self, out_dim, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., settings=''):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(out_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.settings = settings
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
# original self-attention for the original path
attn_ori = (q @ k.transpose(-2, -1)) * self.scale
attn_ori = attn_ori.softmax(dim=-1)
attn_ori = self.attn_drop(attn_ori)
# replace k & q by v
k = v
q = k
# resnets have only one self-attention, norm and larger scale perform better
if self.settings == 'resnet':
k = k / (k.norm(p=2, dim=-1, keepdim=True) + 1e-6)
q = k
scale = self.scale * 8
else:
scale = self.scale
# self-attention, higher temperate for resnets performs better
attn = (q @ k.transpose(-2, -1)) * scale
attn = (attn).softmax(dim=-1)
attn = self.attn_drop(attn)
x_ori = (attn_ori @ v).transpose(1, 2).reshape(B, N, C)
# x = (attn @ v).transpose(1, 2).reshape(B, N, C) # clip_surgery
x = v.transpose(1, 2).reshape(B, N, C) # mask_clip
x = self.proj_drop(self.proj(x))
x_ori = self.proj_drop(self.proj(x_ori))
return [x, x_ori]
# Path: tools/models/ViT_Encoder_add.py
import torch
import torch.nn.functional as F
import math
from torch.nn import Dropout
from torch import nn
from functools import reduce
from operator import mul
from .Encoder_utils import LayerNorm, Transformer, Attention
class SPTCLIPVisionTransformer(nn.Module):
def __init__(self, input_resolution=384, patch_size=16, width=768, layers=12, heads=12, output_dim=512, drop_path_rate=0.1, out_indices=[5,6,7,8,11], pretrained=None, get_embeddings=True,
num_tokens=10, prompt_dim=768, total_d_layer=11, **kwargs):
super().__init__()
self.pretrained = pretrained
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.spatial_size = input_resolution // patch_size
| self.ln_pre = LayerNorm(width) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: qitan/devops-backend-lite
# Path: dbapp/models.py
# Path: common/recursive.py
class RecursiveField(Field):
"""
A field that gets its representation from its parent.
This method could be used to serialize a tree structure, a linked list, or
even a directed acyclic graph. As with all recursive things, it is
important to keep the base case in mind. In the case of the tree serializer
example below, the base case is a node with an empty list of children. In
the case of the list serializer below, the base case is when `next==None`.
Above all, beware of cyclical references.
Examples:
class TreeSerializer(self):
children = ListField(child=RecursiveField())
class ListSerializer(self):
next = RecursiveField(allow_null=True)
"""
# This list of attributes determined by the attributes that
# `rest_framework.serializers` calls to on a field object
PROXIED_ATTRS = (
# methods
'get_value',
'get_initial',
'run_validation',
'get_attribute',
'to_representation',
# attributes
'field_name',
'source',
'read_only',
'default',
'source_attrs',
'write_only',
)
def __init__(self, to=None, **kwargs):
"""
arguments:
to - `None`, the name of another serializer defined in the same module
as this serializer, or the fully qualified import path to another
serializer. e.g. `ExampleSerializer` or
`path.to.module.ExampleSerializer`
"""
self.to = to
self.init_kwargs = kwargs
self._proxied = None
# need to call super-constructor to support ModelSerializer
super_kwargs = dict(
(key, kwargs[key])
for key in kwargs
if key in _signature_parameters(Field.__init__)
)
super(RecursiveField, self).__init__(**super_kwargs)
def bind(self, field_name, parent):
# Extra-lazy binding, because when we are nested in a ListField, the
# RecursiveField will be bound before the ListField is bound
self.bind_args = (field_name, parent)
@property
def proxied(self):
if not self._proxied:
if self.bind_args:
field_name, parent = self.bind_args
if hasattr(parent, 'child') and parent.child is self:
# RecursiveField nested inside of a ListField
parent_class = parent.parent.__class__
else:
# RecursiveField directly inside a Serializer
parent_class = parent.__class__
assert issubclass(parent_class, BaseSerializer)
if self.to is None:
proxied_class = parent_class
else:
try:
module_name, class_name = self.to.rsplit('.', 1)
except ValueError:
module_name, class_name = parent_class.__module__, self.to
try:
proxied_class = getattr(
importlib.import_module(module_name), class_name)
except Exception as e:
raise ImportError(
'could not locate serializer %s' % self.to, e)
# Create a new serializer instance and proxy it
proxied = proxied_class(**self.init_kwargs)
proxied.bind(field_name, parent)
self._proxied = proxied
return self._proxied
def __getattribute__(self, name):
if name in RecursiveField.PROXIED_ATTRS:
try:
proxied = object.__getattribute__(self, 'proxied')
return getattr(proxied, name)
except AttributeError:
pass
return object.__getattribute__(self, name)
# Path: dbapp/models.py
# Path: dbapp/models.py
# Path: common/extends/serializers.py
class ModelSerializer(BaseModelSerializer):
def to_representation(self, instance):
"""
Object instance -> Dict of primitive datatypes.
"""
ret = OrderedDict()
fields = self._readable_fields
for field in fields:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
# We skip `to_representation` for `None` values so that fields do
# not have to explicitly deal with that case.
#
# For related fields with `use_pk_only_optimization` we need to
# resolve the pk value.
check_for_none = attribute.pk if isinstance(
attribute, PKOnlyObject) else attribute
if check_for_none is None:
ret[field.field_name] = None
else:
if field.field_name == 'name':
try:
ret[field.field_name] = field.to_representation(
attribute).lower()
except:
ret[field.field_name] = field.to_representation(
attribute)
else:
ret[field.field_name] = field.to_representation(attribute)
return ret
# Path: apps/workflow/serializers.py
from rest_framework import serializers
from dbapp.models import Product, Project
from common.recursive import RecursiveField
from dbapp.models import UserProfile
from dbapp.models import WorkflowCategory, Workflow, WorkflowNodeHistory, WorkflowTemplate, \
WorkflowTemplateRevisionHistory, WorkflowNodeHistoryCallback
from common.extends.serializers import ModelSerializer
from django.conf import settings
import logging
"""
@Author : Ken Chen
@Contact : 316084217@qq.com
@Time : 2021/11/2 上午9:50
"""
logger = logging.getLogger(__name__)
class WorkflowTemplateSerializer(ModelSerializer):
projects_info = serializers.SerializerMethodField()
env_info = serializers.SerializerMethodField()
def get_env_info(self, instance):
if instance.environment:
return {'name': instance.environment.name, 'alias': instance.environment.alias}
return {}
def get_projects_info(self, instance):
data = []
product_ids = {}
for i in instance.projects:
if i[0] not in product_ids:
product_ids[i[0]] = []
product_ids[i[0]].append(i[1])
for k, v in product_ids.items():
product = Product.objects.get(id=k)
_projects = Project.objects.filter(id__in=v)
data.append({'value': product.id, 'name': product.name, 'label': product.alias,
'children': [{'value': i.id, 'name': i.name, 'label': i.alias} for i in _projects]})
return data
class Meta:
model = WorkflowTemplate
fields = '__all__'
class WorkflowTemplateForRetrieveSerializer(ModelSerializer):
class Meta:
model = WorkflowTemplate
fields = '__all__'
class WorkflowRevisionTemplateSerializer(ModelSerializer):
class Meta:
| model = WorkflowTemplateRevisionHistory |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: timo-reymann/python-oauth2-cli-auth
# Path: oauth2_cli_auth/http_server.py
class OAuthCallbackHttpServer(HTTPServer):
"""
Simplistic HTTP Server to provide local callback URL for oauth2 provider
"""
def __init__(self, port):
super().__init__(("", port), OAuthRedirectHandler)
self._code = None
def get_code(self):
return self._code
@property
def callback_url(self):
return f"http://localhost:{self.server_port}"
def wait_for_code(self, attempts: int = 3, timeout_per_attempt=10) -> Optional[int]:
"""
Wait for the server to open the callback page containing the code query parameter.
It tries for #attempts with a timeout of #timeout_per_attempts for each attempt.
This prevents the CLI from getting stuck by unsolved callback URls
:param attempts: Amount of attempts
:param timeout_per_attempt: Timeout for each attempt to be successful
:return: Code from callback page or None if the callback page is not called successfully
"""
for i in range(0, attempts):
try:
_method_with_timeout(self.handle_request, timeout_seconds=timeout_per_attempt)
except TimeoutException:
continue
if self.get_code() is not None:
return self.get_code()
return None
# Path: oauth2_cli_auth/code_grant.py
class OAuth2ClientInfo:
"""
Metadata for Oauth2 client
"""
authorization_url: str
"""Authorization URL to redirect the user to"""
token_url: str
"""Token URL for fetching the access token"""
client_id: str
"""Id of the client to request for"""
scopes: list[str]
"""List of scopes to request"""
@staticmethod
def from_oidc_endpoint(oidc_config_endpoint: str, client_id: str, scopes: list[str]):
config = load_oidc_config(oidc_config_endpoint)
return OAuth2ClientInfo(
authorization_url=config.get("authorization_endpoint"),
token_url=config.get("token_endpoint"),
client_id=client_id,
scopes=scopes,
)
# Path: oauth2_cli_auth/code_grant.py
def exchange_code_for_access_token(client_info: OAuth2ClientInfo, redirect_uri: str, code: str,
access_token_field: str = "access_token") -> str:
"""
Exchange a code for an access token using the endpoints from client info
:param client_info: Info about oauth2 client
:param redirect_uri: Callback URL
:param code: Code to redeem
:param access_token_field: Name of the field containing the access token to use. This might differ depending on
the provider you are using. For example for Auth0 you have to set this to id_token
:return: Extracted access token from response
"""
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Basic " + base64.b64encode(f"{client_info.client_id}:".encode()).decode(),
}
data = {
"code": code,
"redirect_uri": redirect_uri,
"grant_type": "authorization_code",
}
encoded_data = urllib.parse.urlencode(data).encode('utf-8')
request = urllib.request.Request(client_info.token_url, data=encoded_data, headers=headers)
json_response = _load_json(request)
return json_response.get(access_token_field)
# Path: oauth2_cli_auth/code_grant.py
def get_auth_url(client_info: OAuth2ClientInfo, redirect_uri: str) -> str:
"""
Build authorization url for browser
:param client_info: Info about oauth2 client
:param redirect_uri: Callback URL
:return: Ready to use URL
"""
return (f"{client_info.authorization_url}"
f"?client_id={client_info.client_id}"
f"&redirect_uri={redirect_uri}"
f"&scope={' '.join(client_info.scopes)}"
f"&response_type=code")
# Path: oauth2_cli_auth/code_grant.py
def open_browser(url: str) -> None:
"""
Open browser using webbrowser module and show message about URL open
:param url: URL to open and display
:return: None
"""
print(f"Open your browser at\n{url}")
webbrowser.open(url)
# Path: oauth2_cli_auth/simplified_flow.py
from oauth2_cli_auth import OAuthCallbackHttpServer, get_auth_url, exchange_code_for_access_token, OAuth2ClientInfo, \
open_browser
def get_access_token_with_browser_open(client_info: OAuth2ClientInfo, server_port: int = 8080) -> str:
"""
Provides a simplified API to:
- Spin up the callback server
- Open the browser with the authorization URL
- Wait for the code to arrive
- Get access token from code
:param client_info: Client Info for Oauth2 Interaction
:param server_port: Port of the local web server to spin up
:return: Access Token
"""
callback_server = OAuthCallbackHttpServer(server_port)
auth_url = get_auth_url(client_info, callback_server.callback_url)
| open_browser(auth_url) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: solanav/phishflood
# Path: credfind/utils.py
def extract_inputs(html: str) -> InputList:
"""Given an HTML page, returns a list of inputs or None if nothing was found"""
soup = BeautifulSoup(html, "html.parser")
print("Finding all forms in the page")
forms = soup.find_all("form")
print(f"Found {len(forms)} forms")
if len(forms) == 0:
return []
inputs = []
for fmid, f in enumerate(forms):
form = Form.from_tag(f, fmid)
form_inputs = [
Input.from_tag(tag, imid) for imid, tag in enumerate(f.find_all("input"))
]
fi = count_fillable_inputs(form_inputs)
inputs.append((fi, form, form_inputs))
print(f"Found {len(form_inputs)} inputs inside form")
if len(forms) == 0:
return []
elif len(inputs) > 1:
inputs.sort(key=lambda x: x[0], reverse=True)
return inputs
# Path: credfind/objects.py
class InputType(Enum):
class Method(Enum):
class Form:
class Input:
BUTTON = "button"
CHECKBOX = "checkbox"
COLOR = "color"
DATE = "date"
DATETIMELOCAL = "datetime-local"
EMAIL = "email"
FILE = "file"
HIDDEN = "hidden"
IMAGE = "image"
MONTH = "month"
NUMBER = "number"
PASSWORD = "password"
RADIO = "radio"
RANGE = "range"
RESET = "reset"
SEARCH = "search"
SUBMIT = "submit"
TEL = "tel"
TEXT = "text"
TIME = "time"
URL = "url"
WEEK = "week"
GET = "get"
POST = "post"
NONE = "none"
def from_str(cls, s: str) -> Self:
def from_str(cls, s: str) -> Self:
def from_tag(cls, tag: Tag, meta_id: int) -> Self:
def __str__(self) -> str:
def to_dict(self) -> Dict[str, Any]:
def from_tag(cls, tag: Tag, meta_id: int) -> Self:
def __str__(self) -> str:
def to_dict(self) -> Dict[str, Any]:
# Path: credgen/utils.py
def creds_from_input(inp: Input) -> str:
# Check by keywords
text_fields = [inp.name, inp.id_, inp.placeholder]
if in_any(["email"], text_fields):
return fake_email()
elif in_any(["code", "key", "pin"], text_fields):
return fake_number(6)
elif in_any(["password"], text_fields):
return fake_password()
elif in_any(["user", "uid"], text_fields):
return fake_username()
elif in_any(["document", "dni"], text_fields):
return fake_dni()
# Check by basic type
match inp.type_:
case InputType.EMAIL:
return fake_email()
case InputType.PASSWORD:
return fake_password()
case InputType.TEL:
return fake_number(12)
return fake_letters(10)
# Path: phishflood/rabbit.py
class RabbitConsumer(object):
def __init__(self, callback):
self._callback = callback
self._reconnect_delay = 0
self._consumer = RawConsumer(self._callback)
def run(self):
while True:
try:
self._consumer.run()
except KeyboardInterrupt:
self._consumer.stop()
break
self._maybe_reconnect()
def _maybe_reconnect(self):
if self._consumer.should_reconnect:
self._consumer.stop()
reconnect_delay = self._get_reconnect_delay()
logger.info("Reconnecting after %d seconds", reconnect_delay)
time.sleep(reconnect_delay)
self._consumer = RawConsumer(self._callback)
def _get_reconnect_delay(self):
if self._consumer.was_consuming:
self._reconnect_delay = 0
else:
self._reconnect_delay += 1
if self._reconnect_delay > 30:
self._reconnect_delay = 30
return self._reconnect_delay
# Path: config/general_conf.py
API_URL = "http://localhost:8000/api/v1/"
TOKEN = "dff78ca834d84f829bd912662ee5ce86ca771939"
# Path: phishflood/__main__.py
import json
import os
import sys
import time
import requests
from hashlib import sha256
from typing import Any, Dict, List, Optional, Tuple
from credfind.utils import extract_inputs
from credfind.objects import Input, InputList, InputType
from playwright.sync_api import sync_playwright, TimeoutError, Page
from credgen.utils import creds_from_input
from phishflood.rabbit import RabbitConsumer
from config import general_conf
from pprint import pprint; pprint(forms)
SCREENSHOT_I = 0
Actions = List[Dict[str, Any]]
def screenshot(page: Page):
global SCREENSHOT_I
SCREENSHOT_I += 1
page.screenshot(path=f"samples/{SCREENSHOT_I}.png")
def hash_inputs(inputs: List[Input]) -> str:
"""Returns a unique string identifying the inputs in the website"""
return sha256("".join([str(i) for i in inputs]).encode()).hexdigest()
def flood_page(
page: Page, last_hash: str = "", page_num: int = 0
) -> Optional[Tuple[str, InputList, Actions]]:
"""Returns a unique string identifying the inputs in the website"""
# Get a first screenshot
page.wait_for_timeout(3000)
screenshot(page)
# Get html and extract the inputs
try:
html = page.content()
except:
return None
res = extract_inputs(html)
if len(res) > 0:
fi, form, inputs = res[0]
else:
print("No inputs found")
return None
# Calculate the hash of the inputs
input_hash = hash_inputs(inputs)
print(f"Input hash: {input_hash}")
if input_hash == last_hash:
print("Already flooded this page")
return None
form_locator = page.locator(f"form >> nth = {form.meta_id}")
actions = []
# Generate the fake credentials for each form and each input
for inp in inputs:
FILLABLE_INPUTS = [
InputType.TEXT,
InputType.EMAIL,
InputType.PASSWORD,
InputType.NUMBER,
InputType.TEL,
InputType.SEARCH,
InputType.URL,
]
if inp.type_ in FILLABLE_INPUTS:
| text = creds_from_input(inp) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: abing7k/redroid-script
# Path: stuffs/general.py
class General:
def download(self):
loc_md5 = ""
if os.path.isfile(self.dl_file_name):
with open(self.dl_file_name,"rb") as f:
bytes = f.read()
loc_md5 = hashlib.md5(bytes).hexdigest()
while not os.path.isfile(self.dl_file_name) or loc_md5 != self.act_md5:
if os.path.isfile(self.dl_file_name):
os.remove(self.dl_file_name)
print_color("md5 mismatches, redownloading now ....",bcolors.YELLOW)
loc_md5 = download_file(self.dl_link, self.dl_file_name)
def extract(self):
print_color("Extracting archive...", bcolors.GREEN)
print(self.dl_file_name)
print(self.extract_to)
with zipfile.ZipFile(self.dl_file_name) as z:
z.extractall(self.extract_to)
def copy(self):
pass
def install(self):
# pass
self.download()
self.extract()
self.copy()
# Path: tools/helper.py
class bcolors:
RED = '\033[31m'
YELLOW = '\033[33m'
GREEN = '\033[32m'
ENDC = '\033[0m'
# Path: tools/helper.py
def get_download_dir():
download_loc = ""
if os.environ.get("XDG_CACHE_HOME", None) is None:
download_loc = os.path.join('/', "home", os.environ.get("SUDO_USER", os.environ["USER"]), ".cache", "redroid", "downloads")
else:
download_loc = os.path.join(os.environ["XDG_CACHE_HOME"], "redroid", "downloads")
if not os.path.exists(download_loc):
os.makedirs(download_loc)
return download_loc
# Path: tools/helper.py
def print_color(str, color):
print(color+str+bcolors.ENDC)
# Path: tools/helper.py
def run(args):
result = subprocess.run(args=args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.stderr:
print(result.stderr.decode("utf-8"))
raise subprocess.CalledProcessError(
returncode = result.returncode,
cmd = result.args,
stderr = result.stderr
)
return result
# Path: stuffs/ndk.py
import os
import shutil
from stuffs.general import General
from tools.helper import bcolors, get_download_dir, print_color, run
class Ndk(General):
download_loc = get_download_dir()
copy_dir = "./ndk"
dl_link = "https://github.com/supremegamers/vendor_google_proprietary_ndk_translation-prebuilt/archive/181d9290a69309511185c4417ba3d890b3caaaa8.zip"
dl_file_name = os.path.join(download_loc, "libndktranslation.zip")
extract_to = "/tmp/libndkunpack"
act_md5 = "0beff55f312492f24d539569d84f5bfb"
# init_rc_component = """
# # Enable native bridge for target executables
# on early-init
# mount binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc
# on property:ro.enable.native.bridge.exec=1
# copy /system/etc/binfmt_misc/arm_exe /proc/sys/fs/binfmt_misc/register
# copy /system/etc/binfmt_misc/arm_dyn /proc/sys/fs/binfmt_misc/register
# copy /system/etc/binfmt_misc/arm64_exe /proc/sys/fs/binfmt_misc/register
# copy /system/etc/binfmt_misc/arm64_dyn /proc/sys/fs/binfmt_misc/register
# """
def download(self):
| print_color("Downloading libndk now .....", bcolors.GREEN) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zvict/papr
# Path: dataset/utils.py
def load_meta_data(args, mode="train"):
"""
0 -----------> W
|
|
|
⬇
H
[H, W, 4]
"""
image_paths = None
if args.type == "synthetic":
images, poses, hwf, image_paths = load_blender_data(
args.path, split=mode, factor=args.factor, read_offline=args.read_offline)
print('Loaded blender', images.shape, hwf, args.path)
H, W, focal = hwf
hwf = [H, W, focal, focal]
if args.white_bg:
images = images[..., :3] * \
images[..., -1:] + (1. - images[..., -1:])
else:
images = images[..., :3]
elif args.type == "t2":
images, poses, hwf, image_paths = load_t2_data(
args.path, factor=args.factor, split=mode, read_offline=args.read_offline)
print('Loaded t2', images.shape, hwf, args.path,
images.min(), images.max(), images[0, 10, 10, :])
if args.white_bg and images.shape[-1] == 4:
images = images[..., :3] * \
images[..., -1:] + (1. - images[..., -1:])
elif not args.white_bg:
images = images[..., :3]
mask = images.sum(-1) == 3.0
images[mask] = 0.
else:
raise ValueError("Unknown dataset type: {}".format(args.type))
H, W, focal_x, focal_y = hwf
images = torch.from_numpy(images).float()
poses = torch.from_numpy(poses).float()
return images, poses, H, W, focal_x, focal_y, image_paths
# Path: dataset/utils.py
def get_rays(H, W, focal_x, focal_y, c2w, fineness=1):
N = c2w.shape[0]
width = torch.linspace(
0, W / focal_x, steps=int(W / fineness) + 1, dtype=torch.float32)
height = torch.linspace(
0, H / focal_y, steps=int(H / fineness) + 1, dtype=torch.float32)
y, x = torch.meshgrid(height, width)
pixel_size_x = width[1] - width[0]
pixel_size_y = height[1] - height[0]
x = (x - W / focal_x / 2 + pixel_size_x / 2)[:-1, :-1]
y = -(y - H / focal_y / 2 + pixel_size_y / 2)[:-1, :-1]
# [H, W, 3], vectors, since the camera is at the origin
dirs_d = torch.stack([x, y, -torch.ones_like(x)], -1)
rays_d = cam_to_world(dirs_d.unsqueeze(0), c2w) # [N, H, W, 3]
rays_o = c2w[:, :3, -1] # [N, 3]
return rays_o, rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
# Path: dataset/utils.py
def extract_patches(imgs, rays_o, rays_d, args):
patch_opt = args.patches
N, H, W, C = imgs.shape
if patch_opt.type == "continuous":
num_patches_H = math.ceil(
(H - patch_opt.overlap) / (patch_opt.height - patch_opt.overlap))
num_patches_W = math.ceil(
(W - patch_opt.overlap) / (patch_opt.width - patch_opt.overlap))
num_patches = num_patches_H * num_patches_W
rayd_patches = np.zeros(
(N, num_patches, patch_opt.height, patch_opt.width, 3), dtype=np.float32)
rayo_patches = np.zeros((N, num_patches, 3), dtype=np.float32)
img_patches = np.zeros(
(N, num_patches, patch_opt.height, patch_opt.width, C), dtype=np.float32)
for i in range(N):
n_patch = 0
for start_height in range(0, H - patch_opt.overlap, patch_opt.height - patch_opt.overlap):
for start_width in range(0, W - patch_opt.overlap, patch_opt.width - patch_opt.overlap):
end_height = min(start_height + patch_opt.height, H)
end_width = min(start_width + patch_opt.width, W)
start_height = end_height - patch_opt.height
start_width = end_width - patch_opt.width
rayd_patches[i, n_patch, :, :] = rays_d[i,
start_height:end_height, start_width:end_width]
rayo_patches[i, n_patch, :] = rays_o[i, :]
img_patches[i, n_patch, :, :] = imgs[i,
start_height:end_height, start_width:end_width]
n_patch += 1
elif patch_opt.type == "random":
num_patches = patch_opt.max_patches
rayd_patches = np.zeros(
(N, num_patches, patch_opt.height, patch_opt.width, 3), dtype=np.float32)
rayo_patches = np.zeros((N, num_patches, 3), dtype=np.float32)
img_patches = np.zeros(
(N, num_patches, patch_opt.height, patch_opt.width, C), dtype=np.float32)
for i in range(N):
for n_patch in range(num_patches):
start_height = np.random.randint(0, H - patch_opt.height)
start_width = np.random.randint(0, W - patch_opt.width)
end_height = start_height + patch_opt.height
end_width = start_width + patch_opt.width
rayd_patches[i, n_patch, :, :] = rays_d[i,
start_height:end_height, start_width:end_width]
rayo_patches[i, n_patch, :] = rays_o[i, :]
img_patches[i, n_patch, :, :] = imgs[i,
start_height:end_height, start_width:end_width]
return img_patches, rayd_patches, rayo_patches, num_patches
# Path: dataset/dataset.py
import torch
import numpy as np
import imageio
from torch.utils.data import Dataset
from PIL import Image
from .utils import load_meta_data, get_rays, extract_patches
class RINDataset(Dataset):
""" Ray Image Normal Dataset """
def __init__(self, args, mode='train'):
self.args = args
| images, c2w, H, W, focal_x, focal_y, image_paths = load_meta_data( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: rinnakk/nue-asr
# Path: nue_asr/transcribe.py
@torch.inference_mode()
def transcribe(
model: NueASRModel,
tokenizer: PreTrainedTokenizer,
audio: Union[str, np.ndarray, torch.Tensor],
**decode_options,
) -> ASRResult:
device = model.device
sr = 16000
decode_options.setdefault("do_sample", False)
decode_options.setdefault("num_beams", 1)
decode_options.setdefault("temperature", 1.0)
decode_options.setdefault("top_p", 1.0)
decode_options.setdefault("min_new_tokens", 2)
decode_options.setdefault("max_new_tokens", None)
if isinstance(audio, str):
from librosa import load
audio = load(audio, sr=sr)[0]
if not torch.is_tensor(audio):
audio = torch.from_numpy(audio)
if audio.dim() != 1:
assert audio.dim() == 2 and audio.shape[0] == 1, "Only mono audio is supported."
audio = audio.to(model.dtype).reshape(1, -1)
audio_len_sec = audio.shape[-1] / sr
if decode_options["max_new_tokens"] is None:
decode_options["max_new_tokens"] = int(4 * audio_len_sec + 20 + 0.5)
if audio_len_sec > WARN_TOO_LONG_THRESHOLD:
logger.warning(
f"The input audio is {audio_len_sec:.1f} sec, "
"but such long audio inputs may degrade recognition accuracy. "
"It is recommended to split the audio into shorter segments."
)
prefix_token = tokenizer.encode(
"<s>",
add_special_tokens=False,
return_tensors="pt",
)
postfix_token = tokenizer.encode(
"[SEP]",
add_special_tokens=False,
return_tensors="pt",
)
outputs = model(
prefix_token.to(device),
audio.to(device),
postfix_token.to(device),
pad_token_id=tokenizer.pad_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
**decode_options,
)
output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return ASRResult(text=output_text)
# Path: nue_asr/utils.py
def load_model(
model_name_or_path: Optional[str] = None,
device: Optional[Union[str, torch.device]] = "cuda",
fp16: bool = True,
use_deepspeed: bool = False,
) -> NueASRModel:
if model_name_or_path is None:
model_name_or_path = DEFAULT_MODEL_NAME
device = torch.device(device)
if device.type == "cpu":
if torch.cuda.is_available():
logging.warning(
"CUDA is available but using CPU. "
"If you want to use CUDA, set `device` to `cuda`."
)
if fp16:
logging.warning("FP16 is not supported on CPU. Using FP32 instead.")
fp16 = False
if use_deepspeed:
logging.warning("DeepSpeed is not supported on CPU. Disabling it.")
use_deepspeed = False
dtype = torch.float16 if fp16 else torch.float32
model = NueASRModel.from_pretrained(model_name_or_path)
model.to(dtype)
if use_deepspeed:
try:
import deepspeed
except ImportError:
raise ImportError(
"DeepSpeed is not installed. Please install it with `pip install deepspeed`."
)
ds_engine = deepspeed.init_inference(
model.llm,
replace_with_kernel_inject=True,
dtype=dtype,
)
for m in ds_engine.modules():
if (
getattr(m, "config", None)
and getattr(m.config, "mlp_after_attn", None) is not None
):
m.config.mlp_after_attn = not model.llm.config.use_parallel_residual
model.llm = ds_engine.module
if device is not None:
model.to(device)
logger.info(f"Finished loading model from {model_name_or_path}")
return model
# Path: nue_asr/utils.py
def load_tokenizer(model_name_or_path: Optional[str] = None):
if model_name_or_path is None:
model_name_or_path = DEFAULT_MODEL_NAME
tokenizer = AutoTokenizer.from_pretrained(
model_name_or_path, use_fast=False, legacy=True
)
return tokenizer
# Path: nue_asr/utils.py
def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Path: nue_asr/utils.py
def str2bool(v: str):
if v.lower() in ("true", "t", "yes", "y", "1"):
return True
if v.lower() in ("false", "f", "no", "n", "0"):
return False
raise ValueError(f"Invalid boolean value: {v}")
# Path: nue_asr/cli.py
import argparse
import os
import torch
from .transcribe import transcribe
from .utils import load_model, load_tokenizer, set_seed, str2bool
#!/usr/bin/env python3
# Copyright 2023 rinna Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def cli_main():
default_device = "cuda" if torch.cuda.is_available() else "cpu"
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"audio_files",
nargs="+",
type=str,
help="Audio file paths",
)
parser.add_argument(
"--model",
type=str,
default=None,
help="Model name or path",
)
parser.add_argument(
"--device",
type=str,
default=default_device,
help="Device to use for inference.",
)
parser.add_argument(
| "--fp16", type=str2bool, default=True, help="Whether to fp16 inference." |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AdaCheng/EgoThink
# Path: models/instruct_blip/common/registry.py
class Registry:
def register_model(cls, name):
def wrap(model_cls):
def register_processor(cls, name):
def wrap(processor_cls):
def register_lr_scheduler(cls, name):
def wrap(lr_sched_cls):
def register_runner(cls, name):
def wrap(runner_cls):
def register_path(cls, name, path):
def register(cls, name, obj):
def get_builder_class(cls, name):
def get_model_class(cls, name):
def get_task_class(cls, name):
def get_processor_class(cls, name):
def get_lr_scheduler_class(cls, name):
def get_runner_class(cls, name):
def list_runners(cls):
def list_models(cls):
def list_tasks(cls):
def list_processors(cls):
def list_lr_schedulers(cls):
def list_datasets(cls):
def get_path(cls, name):
def get(cls, name, default=None, no_warning=False):
def unregister(cls, name):
# Path: models/instruct_blip/processors/base_processor.py
class BaseProcessor:
def __init__(self):
self.transform = lambda x: x
return
def __call__(self, item):
return self.transform(item)
@classmethod
def from_config(cls, cfg=None):
return cls()
def build(self, **kwargs):
cfg = OmegaConf.create(kwargs)
return self.from_config(cfg)
# Path: models/instruct_blip/processors/randaugment.py
class RandomAugment(object):
def __init__(self, N=2, M=10, isPIL=False, augs=[]):
self.N = N
self.M = M
self.isPIL = isPIL
if augs:
self.augs = augs
else:
self.augs = list(arg_dict.keys())
def get_random_ops(self):
sampled_ops = np.random.choice(self.augs, self.N)
return [(op, 0.5, self.M) for op in sampled_ops]
def __call__(self, img):
if self.isPIL:
img = np.array(img)
ops = self.get_random_ops()
for name, prob, level in ops:
if np.random.random() > prob:
continue
args = arg_dict[name](level)
img = func_dict[name](img, *args)
return img
# Path: models/instruct_blip/processors/blip_processors.py
import re
from ..common.registry import registry
from .base_processor import BaseProcessor
from .randaugment import RandomAugment
from omegaconf import OmegaConf
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class BlipImageBaseProcessor(BaseProcessor):
def __init__(self, mean=None, std=None):
if mean is None:
mean = (0.48145466, 0.4578275, 0.40821073)
if std is None:
std = (0.26862954, 0.26130258, 0.27577711)
self.normalize = transforms.Normalize(mean, std)
| @registry.register_processor("blip_caption") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TristanBilot/mlx-GCN
# Path: datasets.py
def download_cora():
"""Downloads the cora dataset into a local cora folder."""
url = "https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz"
extract_to = "."
if os.path.exists(os.path.join(extract_to, "cora")):
return
response = requests.get(url, stream=True)
if response.status_code == 200:
file_path = os.path.join(extract_to, url.split("/")[-1])
# Write the file to local disk
with open(file_path, "wb") as file:
file.write(response.raw.read())
# Extract the .tgz file
with tarfile.open(file_path, "r:gz") as tar:
tar.extractall(path=extract_to)
print(f"Cora dataset extracted to {extract_to}")
os.remove(file_path)
# Path: datasets.py
def load_data(config):
"""Loads the Cora graph data into MLX array format."""
print("Loading Cora dataset...")
# Graph nodes
raw_nodes_data = np.genfromtxt(config.nodes_path, dtype="str")
raw_node_ids = raw_nodes_data[:, 0].astype(
"int32"
) # unique identifier of each node
raw_node_labels = raw_nodes_data[:, -1]
labels_enumerated = enumerate_labels(raw_node_labels) # target labels as integers
node_features = sparse.csr_matrix(raw_nodes_data[:, 1:-1], dtype="float32")
# Edges
ids_ordered = {raw_id: order for order, raw_id in enumerate(raw_node_ids)}
raw_edges_data = np.genfromtxt(config.edges_path, dtype="int32")
edges_ordered = np.array(
list(map(ids_ordered.get, raw_edges_data.flatten())), dtype="int32"
).reshape(raw_edges_data.shape)
# Adjacency matrix
adj = sparse.coo_matrix(
(np.ones(edges_ordered.shape[0]), (edges_ordered[:, 0], edges_ordered[:, 1])),
shape=(labels_enumerated.shape[0], labels_enumerated.shape[0]),
dtype=np.float32,
)
# Make the adjacency matrix symmetric
adj = adj + adj.T.multiply(adj.T > adj)
adj = normalize_adjacency(adj)
print("Dataset loaded.")
return node_features.toarray(), labels_enumerated, adj.toarray()
# Path: datasets.py
def train_val_test_mask(labels, num_classes):
"""Splits the loaded dataset into train/validation/test sets."""
train_set = list(range(140))
validation_set = list(range(200, 500))
test_set = list(range(500, 1500))
return train_set, validation_set, test_set
# Path: main_torch.py
from argparse import ArgumentParser
from time import time
from datasets import download_cora, load_data, train_val_test_mask
import torch
import torch.nn as nn
class GCNLayer(nn.Module):
def __init__(self, x_dim, h_dim, bias=True):
super(GCNLayer, self).__init__()
self.weight = nn.Parameter(torch.FloatTensor(torch.zeros(size=(x_dim, h_dim))))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(torch.zeros(size=(h_dim,))))
else:
self.register_parameter('bias', None)
self.initialize_weights()
def initialize_weights(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.zeros_(self.bias)
def forward(self, x, adj):
x = x @ self.weight
if self.bias is not None:
x += self.bias
return torch.mm(adj, x)
class GCN(nn.Module):
def __init__(self, x_dim, h_dim, out_dim, nb_layers=2, dropout=0.5, bias=True):
super(GCN, self).__init__()
layer_sizes = [x_dim] + [h_dim] * nb_layers + [out_dim]
self.gcn_layers = nn.Sequential(*[
GCNLayer(in_dim, out_dim, bias)
for in_dim, out_dim in zip(layer_sizes[:-1], layer_sizes[1:])
])
self.dropout = nn.Dropout(p=dropout)
def initialize_weights(self):
self.gcn_1.initialize_weights()
self.gcn_2.initialize_weights()
def forward(self, x, adj):
for layer in self.gcn_layers[:-1]:
x = torch.relu(layer(x, adj))
x = self.dropout(x)
x = self.gcn_layers[-1](x, adj)
return x
def to_torch(device, x, y, adj, train_mask, val_mask, test_mask):
x = torch.tensor(x, dtype=torch.float32, device=device)
y = torch.tensor(y, dtype=torch.long, device=device)
adj = torch.tensor(adj, dtype=torch.float32, device=device)
train_mask = torch.tensor(train_mask, device=device)
val_mask = torch.tensor(val_mask, device=device)
test_mask = torch.tensor(test_mask, device=device)
return x, y, adj, train_mask, val_mask, test_mask
def eval_fn(x, y):
return torch.mean((torch.argmax(x, axis=1) == y).float())
def main(args, device):
# Data loading
download_cora()
x, y, adj = load_data(args)
| train_mask, val_mask, test_mask = train_val_test_mask(y, args.nb_classes) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: 3dlg-hcvc/cage
# Path: models/utils.py
class FinalLayer(nn.Module):
def __init__(self, in_ch, out_ch=None, dropout=0.):
super().__init__()
out_ch = in_ch if out_ch is None else out_ch
self.linear = nn.Linear(in_ch, out_ch)
self.norm = AdaLayerNormTC(in_ch, 2*in_ch, dropout)
def forward(self, x, t, cond=None):
assert cond is not None
x = self.norm(x, t, cond)
x = self.linear(x)
return x
# Path: models/utils.py
class PEmbeder(nn.Module):
def __init__(self, vocab_size, d_model):
super().__init__()
self.embed = nn.Embedding(vocab_size, d_model)
self._init_embeddings()
def _init_embeddings(self):
nn.init.kaiming_normal_(self.embed.weight, mode="fan_in")
def forward(self, x, idx=None):
if idx is None:
idx = torch.arange(x.shape[1], device=x.device).long()
return x + self.embed(idx)
# Path: models/utils.py
class AAB(nn.Module):
def __init__(self,
dim: int,
num_attention_heads: int,
attention_head_dim: int,
dropout=0.0,
activation_fn: str = "geglu",
num_embeds_ada_norm: int = None,
attention_bias: bool = False,
norm_elementwise_affine: bool = True,
final_dropout: bool = False,
class_dropout_prob: float = 0.0 # for classifier-free
):
super().__init__()
self.norm1 = MyAdaLayerNormZero(dim, num_embeds_ada_norm, class_dropout_prob)
self.global_attn = Attention(
query_dim=dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
)
self.norm2 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
self.attr_attn = Attention(
query_dim=dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
)
self.graph_attn = Attention(
query_dim=dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
)
self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
self.norm4 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)
def forward(self, hidden_states, pad_mask, attr_mask, graph_mask, timestep, class_labels):
norm_hidden_states, gate_1, shift_mlp, scale_mlp, gate_mlp, gate_2, gate_3 = self.norm1(
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
)
attr_out = self.attr_attn(norm_hidden_states, attention_mask=attr_mask)
attr_out = gate_1.unsqueeze(1) * attr_out
hidden_states = hidden_states + attr_out
norm_hidden_states = self.norm2(hidden_states)
global_out = self.global_attn(norm_hidden_states, attention_mask=pad_mask)
global_out = gate_2.unsqueeze(1) * global_out
hidden_states = hidden_states + global_out
norm_hidden_states = self.norm3(hidden_states)
graph_out = self.graph_attn(norm_hidden_states, attention_mask=graph_mask)
graph_out = gate_3.unsqueeze(1) * graph_out
hidden_states = hidden_states + graph_out
norm_hidden_states = self.norm4(hidden_states)
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
ff_output = self.ff(norm_hidden_states)
ff_output = gate_mlp.unsqueeze(1) * ff_output
hidden_states = ff_output + hidden_states
return hidden_states
# Path: models/denoiser.py
import torch
import models
from torch import nn
from models.utils import FinalLayer, PEmbeder, AAB
@models.register('denoiser')
class AABModel(nn.Module):
'''
Denoiser based on Attribute Attention Block (AAB)
3 sequential attentions: local -> global -> graph
'''
def __init__(self, hparams):
super(AABModel, self).__init__()
self.hparams = hparams
in_ch = hparams.in_ch
attn_dim = hparams.attn_dim
dropout = hparams.dropout
n_head = hparams.n_head
head_dim = attn_dim // n_head
num_embeds_ada_norm = 6*attn_dim
self.K = self.hparams.get('K', 32)
self.x_embedding = nn.Linear(in_ch, attn_dim)
self.pe_node = PEmbeder(self.K, attn_dim)
self.pe_attr = PEmbeder(5, attn_dim)
self.attn_layers = nn.ModuleList(
[ # to do: refactor this block, customize the eps of layernorm if train with fp16
AAB(dim=attn_dim,
num_attention_heads=n_head,
attention_head_dim=head_dim,
dropout=dropout,
activation_fn="geglu",
num_embeds_ada_norm=num_embeds_ada_norm,
attention_bias=False,
norm_elementwise_affine=True,
final_dropout=False,
)
for d in range(hparams.n_layers)
]
)
| self.final_layer = FinalLayer(attn_dim, in_ch, dropout=dropout) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: modelscope/llmuses
# Path: llmuses/benchmarks/benchmark.py
class Benchmark(object):
"""
Wrapper for loading datasets from ModelScope or HuggingFace.
"""
def __init__(self):
...
@staticmethod
def load(dataset_name: str,
subset: str = None,
split: str = None,
token: str = None,
hub: str = 'ModelScope',
work_dir: Optional[str] = DEFAULT_ROOT_CACHE_DIR,
**kwargs):
"""
Load a dataset from ModelScope or HuggingFace.
Args:
dataset_name (str): The dataset id or path.
If it is dataset id, should be in the format of `organization/name` for ModelScope and HuggingFace hub.
If it is dataset path, should be the path on local disk.
subset (str):
split:
token: sdk token for ModelScope, optional, default None
hub: `ModelScope` or `HuggingFace`
work_dir: the work directory for caching, optional
Returns:
A dict.
"""
work_dir = os.path.join(work_dir, 'benchmarks', dataset_name.replace('/', '_'))
if hub == 'ModelScope':
from modelscope.msdatasets import MsDataset
dataset = MsDataset.load(dataset_name=dataset_name, subset_name=subset, split=split, token=token,
cache_dir=work_dir, **kwargs)
dataset.dataset_name = dataset_name.split('/')[-1]
dataset.subset_name = subset
dataset.split = split
return dataset
elif hub == 'HuggingFace':
# TODO: implement this by xingjun.wxj@alibaba-inc.com
...
else:
raise ValueError(f'hub must be `ModelScope` or `HuggingFace`, but got {hub}')
# Path: llmuses/constants.py
DEFAULT_ROOT_CACHE_DIR = '~/.cache/llmuses'
# Path: llmuses/constants.py
class AnswerKeys:
ANSWER_ID = 'answer_id'
RAW_INPUT = 'raw_input'
ORIGIN_PROMPT = 'origin_prompt'
MODEL_SPEC = 'model_spec'
SUBSET_NAME = 'subset_name'
CHOICES = 'choices'
# Path: llmuses/utils/logger.py
def get_logger(log_file: Optional[str] = None,
log_level: int = logging.INFO,
file_mode: str = 'w'):
""" Get logging logger
Args:
log_file: Log filename, if specified, file handler will be added to
logger
log_level: Logging level.
file_mode: Specifies the mode to open the file, if filename is
specified (if filemode is unspecified, it defaults to 'w').
"""
logger_name = __name__.split('.')[0]
logger = logging.getLogger(logger_name)
if logger_name in init_loggers:
add_file_handler_if_needed(logger, log_file, file_mode, log_level)
return logger
for handler in logger.root.handlers:
if type(handler) is logging.StreamHandler:
handler.setLevel(logging.ERROR)
stream_handler = logging.StreamHandler()
handlers = [stream_handler]
if log_file is not None:
file_handler = logging.FileHandler(log_file, file_mode)
handlers.append(file_handler)
for handler in handlers:
handler.setFormatter(formatter)
handler.setLevel(log_level)
logger.addHandler(handler)
logger.setLevel(log_level)
init_loggers[logger_name] = True
return logger
# Path: llmuses/benchmarks/data_adapter.py
from abc import ABC, abstractmethod
from typing import Any, Optional
from llmuses.benchmarks import Benchmark
from llmuses.constants import DEFAULT_ROOT_CACHE_DIR, AnswerKeys
from llmuses.utils.logger import get_logger
import random
# Copyright (c) Alibaba, Inc. and its affiliates.
logger = get_logger()
class DataAdapter(ABC):
def __init__(self,
subset_list: list,
metric_list: list,
few_shot_num: Optional[int] = 0,
train_split: Optional[str] = None,
eval_split: Optional[str] = None,
**kwargs):
"""
Args:
subset_list: list of subset names for the dataset.
metric_list: list, the metric list to evaluate the model on specific benchmark.
few_shot_num: int, number of few-shot examples. Default: 0
train_split: str, usually for few-shot examples. e.g. 'train'
eval_split: str, the target eval split name. e.g. 'test'
"""
self.subset_list = subset_list
self.metric_list = metric_list
self.few_shot_num = few_shot_num
self.train_split = train_split
self.eval_split = eval_split
def load(self,
dataset_name_or_path: str,
subset_list: list = None,
work_dir: Optional[str] = DEFAULT_ROOT_CACHE_DIR,
**kwargs) -> dict:
"""
Load the dataset. Remote and local datasets are supported.
You can rewrite this method to support your own local dataset, just follow the format of the output.
Returns: {'subset_name': {'train': train_dataset, 'test': test_dataset}}
train_dataset, test_dataset: Iterable dataset, object each item of which is a dict.
TODO: local data path to be supported.
"""
data_dict = {}
split_list = [split for split in [self.train_split, self.eval_split] if split is not None]
if len(split_list) == 0:
logger.error(f'Got empty split list: {split_list}')
subset_list = subset_list if subset_list is not None else self.subset_list
for sub_name in subset_list:
data_dict[sub_name] = {}
# e.g. train: few-shot, test: target dataset to evaluate
for split in split_list:
| dataset = Benchmark.load(dataset_name=dataset_name_or_path, |