prompt
stringlengths 1.74k
34.3k
| ref
stringlengths 4
432
|
---|---|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: thuml/iTransformer
# Path: model/Transformer.py
class Model(nn.Module):
def __init__(self, configs):
def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
# Path: model/Informer.py
class Model(nn.Module):
def __init__(self, configs):
def long_forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
# Path: model/Reformer.py
class Model(nn.Module):
def __init__(self, configs, bucket_size=4, n_hashes=4):
def long_forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
# Path: model/Flowformer.py
class Model(nn.Module):
def __init__(self, configs):
def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
# Path: model/Flashformer.py
class Model(nn.Module):
def __init__(self, configs):
def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
# Path: model/iTransformer.py
class Model(nn.Module):
def __init__(self, configs):
def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
_, _, N = x_enc.shape # B L N
# Path: model/iInformer.py
class Model(nn.Module):
def __init__(self, configs):
def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
_, _, N = x_enc.shape
# Path: model/iReformer.py
class Model(nn.Module):
def __init__(self, configs):
def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
_, _, N = x_enc.shape
# Path: model/iFlowformer.py
class Model(nn.Module):
def __init__(self, configs):
def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
_, _, N = x_enc.shape
# Path: model/iFlashformer.py
class Model(nn.Module):
def __init__(self, configs):
def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
_, _, N = x_enc.shape
# Path: experiments/exp_basic.py
import os
import torch
from model import Transformer, Informer, Reformer, Flowformer, Flashformer, \
iTransformer, iInformer, iReformer, iFlowformer, iFlashformer
class Exp_Basic(object):
def __init__(self, args):
self.args = args
self.model_dict = {
'Transformer': Transformer,
'Informer': Informer,
'Reformer': Reformer,
'Flowformer': Flowformer,
'Flashformer': Flashformer,
'iTransformer': iTransformer,
'iInformer': iInformer,
'iReformer': iReformer,
'iFlowformer': iFlowformer,
| 'iFlashformer': iFlashformer, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kylesargent/ZeroNVS
# Path: threestudio/utils/GAN/attention.py
class LinearAttention(nn.Module):
def __init__(self, dim, heads=4, dim_head=32):
super().__init__()
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x)
q, k, v = rearrange(
qkv, "b (qkv heads c) h w -> qkv b heads c (h w)", heads=self.heads, qkv=3
)
k = k.softmax(dim=-1)
context = torch.einsum("bhdn,bhen->bhde", k, v)
out = torch.einsum("bhde,bhdn->bhen", context, q)
out = rearrange(
out, "b heads c (h w) -> b (heads c) h w", heads=self.heads, h=h, w=w
)
return self.to_out(out)
# Path: threestudio/utils/GAN/util.py
def instantiate_from_config(config):
if not "target" in config:
if config == "__is_first_stage__":
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
# Path: threestudio/utils/GAN/vae.py
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from threestudio.utils.GAN.attention import LinearAttention
from threestudio.utils.GAN.util import instantiate_from_config
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
return emb
def nonlinearity(x):
# swish
return x * torch.sigmoid(x)
def Normalize(in_channels, num_groups=32):
return torch.nn.BatchNorm2d(num_features=in_channels)
class Upsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
self.conv = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=3, stride=1, padding=1
)
def forward(self, x):
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
if self.with_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
# no asymmetric padding in torch conv, must do it ourselves
self.conv = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=3, stride=2, padding=0
)
def forward(self, x):
if self.with_conv:
pad = (0, 1, 0, 1)
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
x = self.conv(x)
else:
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
return x
class ResnetBlock(nn.Module):
def __init__(
self,
*,
in_channels,
out_channels=None,
conv_shortcut=False,
dropout,
temb_channels=512,
):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.norm1 = Normalize(in_channels)
self.conv1 = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=1, padding=1
)
if temb_channels > 0:
self.temb_proj = torch.nn.Linear(temb_channels, out_channels)
self.norm2 = Normalize(out_channels)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = torch.nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=1, padding=1
)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
self.conv_shortcut = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=1, padding=1
)
else:
self.nin_shortcut = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=1, stride=1, padding=0
)
def forward(self, x, temb):
h = x
h = self.norm1(h)
h = nonlinearity(h)
h = self.conv1(h)
if temb is not None:
h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None]
h = self.norm2(h)
h = nonlinearity(h)
h = self.dropout(h)
h = self.conv2(h)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
x = self.conv_shortcut(x)
else:
x = self.nin_shortcut(x)
return x + h
| class LinAttnBlock(LinearAttention): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: princeton-nlp/LLM-Shearing
# Path: llmshearing/datasets/streaming_dataset.py
class TextDynamicStreamingDataset(DynamicStreamingDataset):
"""
A dataset to load data dynamically from different domains
Adapted from https://github.com/mosaicml/llm-foundry/blob/main/llmfoundry/data/text_data.py#L21
"""
def __init__(self,
local: str,
max_seq_len: int,
shuffle: bool = False,
shuffle_seed: int = 9176,
num_canonical_nodes: Optional[int] = 128,
batch_size: Optional[int] = None,
set_names: List[str] = None,
proportion: List = None,
is_uint16: bool = False):
# Build Dataset
super().__init__(local=local,
shuffle=shuffle,
shuffle_seed=shuffle_seed,
num_canonical_nodes=num_canonical_nodes,
batch_size=batch_size,
set_names=set_names,
proportion=proportion)
# Token ids are in a uint16 format to save memory
self.is_uint16 = is_uint16
self.max_seq_len = max_seq_len
def _read_binary_tokenized_sample(self, sample):
if self.is_uint16:
a = np.frombuffer(sample['tokens'], dtype="B").view(
dtype=np.uint16).astype(np.int64)
tokens = torch.from_numpy(a[:self.max_seq_len].copy())
else:
tokens = torch.from_numpy(np.frombuffer(sample['tokens'], dtype=np.int64)[:self.max_seq_len].copy())
return tokens
def get_sample(self, idx: int) -> Dict[str, Any]:
sample = super().__getitem__(idx)
return sample
# updated
def __getitem__(self, idx: Union[int, Tuple]) -> Dict[str, Any]:
sample = super().__getitem__(idx)
token_sample = self._read_binary_tokenized_sample(sample)
return {"input_ids": token_sample, "set": sample["set"], "idx": idx}
# Path: llmshearing/datasets/streaming_dataset.py
class TextStreamingDataset(StreamingDataset):
"""
A dataset to load fixed data, a simplied version of
Adapted from https://github.com/mosaicml/llm-foundry/blob/main/llmfoundry/data/text_data.py#L21
"""
def __init__(self,
local: str,
split: str,
max_seq_len: int,
shuffle: bool = False,
shuffle_seed: int = 9176,
num_canonical_nodes: Optional[int] = 128,
batch_size: Optional[int] = None,
is_uint16: bool = False):
# Build Dataset
super().__init__(local=local,
split=split,
shuffle=shuffle,
shuffle_seed=shuffle_seed,
num_canonical_nodes=num_canonical_nodes,
batch_size=batch_size)
# Token ids are in a uint16 format to save memory
self.is_uint16 = is_uint16
self.max_seq_len = max_seq_len
def _read_binary_tokenized_sample(self, sample):
if self.is_uint16:
a = np.frombuffer(sample['tokens'], dtype="B").view(
dtype=np.uint16).astype(np.int64)
tokens = torch.from_numpy(a[:self.max_seq_len].copy())
else:
tokens = torch.from_numpy(np.frombuffer(sample['tokens'], dtype=np.int64)[:self.max_seq_len].copy())
return tokens
def get_sample(self, idx: int) -> Dict[str, Any]:
sample = super().__getitem__(idx)
return sample
# updated
def __getitem__(self, idx: Union[int, Tuple]) -> Dict[str, Any]:
sample = super().__getitem__(idx)
token_sample = self._read_binary_tokenized_sample(sample)
return {"input_ids": token_sample, "set": sample["set"], "idx": idx}
# Path: llmshearing/datasets/load_text_dataloader.py
from collections import defaultdict
from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Union
from omegaconf import DictConfig
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.data.data_collator import _torch_collate_batch
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from llmshearing.datasets.streaming_dataset import (
TextDynamicStreamingDataset, TextStreamingDataset)
import torch
import transformers
""" Load text dataloader for training and evaluation. """
def build_text_dataloader(cfg: DictConfig, device_batch_size: int, dynamic: bool = False,
set_names: str = None, proportion: List[float] = None) -> DataLoader:
"""Builds a text dataloader.
Args:
cfg (DictConfig): Configuration dictionary.
device_batch_size (int): Batch size for one single device.
dynamic (bool, optional): Whether to use dynamic streaming dataset to load data from each
domain dynamically. Defaults to False.
set_names (str, optional): Name of the dataset. Defaults to None.
proportion (List[float], optional): Initial proportion of each domain in the dataset. Defaults to None.
Returns:
DataLoader: A PyTorch DataLoader object.
"""
if dynamic:
dataset = TextDynamicStreamingDataset(local=cfg.dataset.local,
max_seq_len=cfg.dataset.max_seq_len,
batch_size=device_batch_size,
shuffle=cfg.dataset.get(
'shuffle', False),
shuffle_seed=cfg.dataset.get(
'shuffle_seed', 9176),
num_canonical_nodes=cfg.dataset.get(
'num_canonical_nodes', 128),
proportion=proportion,
set_names=set_names,
is_uint16=cfg.dataset.get("is_uint16", False))
else:
| dataset = TextStreamingDataset( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: hugoycj/Instant-angelo
# Path: models/base.py
class BaseModel(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.rank = get_rank()
self.setup()
if self.config.get('weights', None):
self.load_state_dict(torch.load(self.config.weights))
def setup(self):
raise NotImplementedError
def update_step(self, epoch, global_step):
pass
def train(self, mode=True):
return super().train(mode=mode)
def eval(self):
return super().eval()
def regularizations(self, out):
return {}
@torch.no_grad()
def export(self, export_config):
return {}
# Path: models/utils.py
def chunk_batch(func, chunk_size, move_to_cpu, *args, **kwargs):
B = None
for arg in args:
if isinstance(arg, torch.Tensor):
B = arg.shape[0]
break
out = defaultdict(list)
out_type = None
for i in range(0, B, chunk_size):
out_chunk = func(*[arg[i:i+chunk_size] if isinstance(arg, torch.Tensor) else arg for arg in args], **kwargs)
if out_chunk is None:
continue
out_type = type(out_chunk)
if isinstance(out_chunk, torch.Tensor):
out_chunk = {0: out_chunk}
elif isinstance(out_chunk, tuple) or isinstance(out_chunk, list):
chunk_length = len(out_chunk)
out_chunk = {i: chunk for i, chunk in enumerate(out_chunk)}
elif isinstance(out_chunk, dict):
pass
else:
print(f'Return value of func must be in type [torch.Tensor, list, tuple, dict], get {type(out_chunk)}.')
exit(1)
for k, v in out_chunk.items():
v = v if torch.is_grad_enabled() else v.detach()
v = v.cpu() if move_to_cpu else v
out[k].append(v)
if out_type is None:
return
out = {k: torch.cat(v, dim=0) for k, v in out.items()}
if out_type is torch.Tensor:
return out[0]
elif out_type in [tuple, list]:
return out_type([out[i] for i in range(chunk_length)])
elif out_type is dict:
return out
# Path: systems/utils.py
def update_module_step(m, epoch, global_step):
if hasattr(m, 'update_step'):
m.update_step(epoch, global_step)
# Path: models/neus.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import models
from models.base import BaseModel
from models.utils import chunk_batch
from systems.utils import update_module_step
from nerfacc import ContractionType, OccupancyGrid, ray_marching, render_weight_from_density, render_weight_from_alpha, accumulate_along_rays
from nerfacc.intersection import ray_aabb_intersect
class VarianceNetwork(nn.Module):
def __init__(self, config):
super(VarianceNetwork, self).__init__()
self.config = config
self.init_val = self.config.init_val
self.register_parameter('variance', nn.Parameter(torch.tensor(self.config.init_val)))
self.modulate = self.config.get('modulate', False)
if self.modulate:
self.mod_start_steps = self.config.mod_start_steps
self.reach_max_steps = self.config.reach_max_steps
self.max_inv_s = self.config.max_inv_s
@property
def inv_s(self):
val = torch.exp(self.variance * 10.0)
if self.modulate and self.do_mod:
val = val.clamp_max(self.mod_val)
return val
def forward(self, x):
return torch.ones([len(x), 1], device=self.variance.device) * self.inv_s
def update_step(self, epoch, global_step):
if self.modulate:
self.do_mod = global_step > self.mod_start_steps
if not self.do_mod:
self.prev_inv_s = self.inv_s.item()
else:
self.mod_val = min((global_step / self.reach_max_steps) * (self.max_inv_s - self.prev_inv_s) + self.prev_inv_s, self.max_inv_s)
@models.register('neus')
class NeuSModel(BaseModel):
def setup(self):
self.geometry = models.make(self.config.geometry.name, self.config.geometry)
self.texture = models.make(self.config.texture.name, self.config.texture)
self.geometry.contraction_type = ContractionType.AABB
if self.config.learned_background:
self.geometry_bg = models.make(self.config.geometry_bg.name, self.config.geometry_bg)
self.texture_bg = models.make(self.config.texture_bg.name, self.config.texture_bg)
self.geometry_bg.contraction_type = ContractionType.UN_BOUNDED_SPHERE
self.near_plane_bg, self.far_plane_bg = 0.1, 1e3
self.cone_angle_bg = 10**(math.log10(self.far_plane_bg) / self.config.num_samples_per_ray_bg) - 1.
self.render_step_size_bg = 0.01
self.variance = VarianceNetwork(self.config.variance)
self.register_buffer('scene_aabb', torch.as_tensor([-self.config.radius, -self.config.radius, -self.config.radius, self.config.radius, self.config.radius, self.config.radius], dtype=torch.float32))
if self.config.grid_prune:
self.occupancy_grid = OccupancyGrid(
roi_aabb=self.scene_aabb,
resolution=128,
contraction_type=ContractionType.AABB
)
if self.config.learned_background:
self.occupancy_grid_bg = OccupancyGrid(
roi_aabb=self.scene_aabb,
resolution=256,
contraction_type=ContractionType.UN_BOUNDED_SPHERE
)
self.randomized = self.config.randomized
self.background_color = None
self.render_step_size = 1.732 * 2 * self.config.radius / self.config.num_samples_per_ray
def update_step(self, epoch, global_step):
| update_module_step(self.geometry, epoch, global_step) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: HKUDS/GraphGPT
# Path: graphgpt/conversation.py
class SeparatorStyle(Enum):
class Conversation:
SINGLE = auto()
TWO = auto()
MPT = auto()
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
def get_prompt(self):
def append_message(self, role, message):
def get_images(self, return_pil=False):
def expand2square(pil_img, background_color=(122, 116, 104)):
def to_gradio_chatbot(self):
def copy(self):
def dict(self):
# Path: graphgpt/constants.py
LOGDIR = "."
# Path: graphgpt/utils.py
def build_logger(logger_name, logger_filename):
def __init__(self, logger, log_level=logging.INFO):
def __getattr__(self, attr):
def write(self, buf):
def flush(self):
def disable_torch_init():
def get_gpu_memory(max_gpus=None):
def violates_moderation(text):
def clean_flant5_ckpt(ckpt_path):
def pretty_print_semaphore(semaphore):
def iter_over_async(
async_gen: AsyncGenerator, event_loop: AbstractEventLoop
) -> Generator:
async def get_next():
def detect_language(text: str) -> str:
class StreamToLogger(object):
# Path: graphgpt/serve/gradio_web_server_graph.py
import argparse
import datetime
import json
import os
import time
import gradio as gr
import requests
import hashlib
from graphgpt.conversation import (default_conversation, conv_templates,
SeparatorStyle)
from graphgpt.constants import LOGDIR
from graphgpt.utils import (build_logger, server_error_msg,
violates_moderation, moderation_msg)
logger = build_logger("gradio_web_server", "gradio_web_server.log")
headers = {"User-Agent": "GraphGPT Client"}
no_change_btn = gr.Button.update()
enable_btn = gr.Button.update(interactive=True)
disable_btn = gr.Button.update(interactive=False)
priority = {
"vicuna-13b": "aaaaaaa",
"koala-13b": "aaaaaab",
}
def get_conv_log_filename():
t = datetime.datetime.now()
name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json")
return name
def get_model_list():
ret = requests.post(args.controller_url + "/refresh_all_workers")
assert ret.status_code == 200
ret = requests.post(args.controller_url + "/list_models")
models = ret.json()["models"]
models.sort(key=lambda x: priority.get(x, x))
logger.info(f"Models: {models}")
return models
get_window_url_params = """
function() {
const params = new URLSearchParams(window.location.search);
url_params = Object.fromEntries(params);
console.log(url_params);
return url_params;
}
"""
def load_demo(url_params, request: gr.Request):
logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
dropdown_update = gr.Dropdown.update(visible=True)
if "model" in url_params:
model = url_params["model"]
if model in models:
dropdown_update = gr.Dropdown.update(
value=model, visible=True)
| state = default_conversation.copy() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: hkchengrex/Cutie
# Path: gui/ritm/model/metrics.py
def _compute_iou(pred_mask, gt_mask, ignore_mask=None, keep_ignore=False):
if ignore_mask is not None:
pred_mask = torch.where(ignore_mask, torch.zeros_like(pred_mask), pred_mask)
reduction_dims = misc.get_dims_with_exclusion(gt_mask.dim(), 0)
union = torch.mean((pred_mask | gt_mask).float(), dim=reduction_dims).detach().cpu().numpy()
intersection = torch.mean((pred_mask & gt_mask).float(), dim=reduction_dims).detach().cpu().numpy()
nonzero = union > 0
iou = intersection[nonzero] / union[nonzero]
if not keep_ignore:
return iou
else:
result = np.full_like(intersection, -1)
result[nonzero] = iou
return result
# Path: gui/ritm/inference/predictors/brs_losses.py
class BRSMaskLoss(torch.nn.Module):
def __init__(self, eps=1e-5):
super().__init__()
self._eps = eps
def forward(self, result, pos_mask, neg_mask):
pos_diff = (1 - result) * pos_mask
pos_target = torch.sum(pos_diff ** 2)
pos_target = pos_target / (torch.sum(pos_mask) + self._eps)
neg_diff = result * neg_mask
neg_target = torch.sum(neg_diff ** 2)
neg_target = neg_target / (torch.sum(neg_mask) + self._eps)
loss = pos_target + neg_target
with torch.no_grad():
f_max_pos = torch.max(torch.abs(pos_diff)).item()
f_max_neg = torch.max(torch.abs(neg_diff)).item()
return loss, f_max_pos, f_max_neg
# Path: gui/ritm/inference/predictors/brs_functors.py
import torch
import numpy as np
from ...model.metrics import _compute_iou
from .brs_losses import BRSMaskLoss
class BaseOptimizer:
def __init__(self, optimizer_params,
prob_thresh=0.49,
reg_weight=1e-3,
min_iou_diff=0.01,
brs_loss=BRSMaskLoss(),
with_flip=False,
flip_average=False,
**kwargs):
self.brs_loss = brs_loss
self.optimizer_params = optimizer_params
self.prob_thresh = prob_thresh
self.reg_weight = reg_weight
self.min_iou_diff = min_iou_diff
self.with_flip = with_flip
self.flip_average = flip_average
self.best_prediction = None
self._get_prediction_logits = None
self._opt_shape = None
self._best_loss = None
self._click_masks = None
self._last_mask = None
self.device = None
def init_click(self, get_prediction_logits, pos_mask, neg_mask, device, shape=None):
self.best_prediction = None
self._get_prediction_logits = get_prediction_logits
self._click_masks = (pos_mask, neg_mask)
self._opt_shape = shape
self._last_mask = None
self.device = device
def __call__(self, x):
opt_params = torch.from_numpy(x).float().to(self.device)
opt_params.requires_grad_(True)
with torch.enable_grad():
opt_vars, reg_loss = self.unpack_opt_params(opt_params)
result_before_sigmoid = self._get_prediction_logits(*opt_vars)
result = torch.sigmoid(result_before_sigmoid)
pos_mask, neg_mask = self._click_masks
if self.with_flip and self.flip_average:
result, result_flipped = torch.chunk(result, 2, dim=0)
result = 0.5 * (result + torch.flip(result_flipped, dims=[3]))
pos_mask, neg_mask = pos_mask[:result.shape[0]], neg_mask[:result.shape[0]]
loss, f_max_pos, f_max_neg = self.brs_loss(result, pos_mask, neg_mask)
loss = loss + reg_loss
f_val = loss.detach().cpu().numpy()
if self.best_prediction is None or f_val < self._best_loss:
self.best_prediction = result_before_sigmoid.detach()
self._best_loss = f_val
if f_max_pos < (1 - self.prob_thresh) and f_max_neg < self.prob_thresh:
return [f_val, np.zeros_like(x)]
current_mask = result > self.prob_thresh
if self._last_mask is not None and self.min_iou_diff > 0:
| diff_iou = _compute_iou(current_mask, self._last_mask) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: DeepGraphLearning/ULTRA
# Path: ultra/tasks.py
def edge_match(edge_index, query_index):
def negative_sampling(data, batch, num_negative, strict=True):
def all_negative(data, batch):
def strict_negative_mask(data, batch):
def compute_ranking(pred, target, mask=None):
def build_relation_graph(graph):
# Path: ultra/util.py
def detect_variables(cfg_file):
def load_config(cfg_file, context=None):
def literal_eval(string):
def parse_args():
def get_root_logger(file=True):
def get_rank():
def get_world_size():
def synchronize():
def get_device(cfg):
def create_working_directory(cfg):
def build_dataset(cfg):
# Path: ultra/models.py
class Ultra(nn.Module):
def __init__(self, rel_model_cfg, entity_model_cfg):
# kept that because super Ultra sounds cool
super(Ultra, self).__init__()
self.relation_model = RelNBFNet(**rel_model_cfg)
self.entity_model = EntityNBFNet(**entity_model_cfg)
def forward(self, data, batch):
# batch shape: (bs, 1+num_negs, 3)
# relations are the same all positive and negative triples, so we can extract only one from the first triple among 1+nug_negs
query_rels = batch[:, 0, 2]
relation_representations = self.relation_model(data.relation_graph, query=query_rels)
score = self.entity_model(data, relation_representations, batch)
return score
# Path: script/pretrain.py
import os
import sys
import copy
import math
import pprint
import torch
from itertools import islice
from functools import partial
from torch import optim
from torch import nn
from torch.nn import functional as F
from torch import distributed as dist
from torch.utils import data as torch_data
from torch_geometric.data import Data
from ultra import tasks, util
from ultra.models import Ultra
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
separator = ">" * 30
line = "-" * 30
def multigraph_collator(batch, train_graphs):
num_graphs = len(train_graphs)
probs = torch.tensor([graph.edge_index.shape[1] for graph in train_graphs]).float()
probs /= probs.sum()
graph_id = torch.multinomial(probs, 1, replacement=False).item()
graph = train_graphs[graph_id]
bs = len(batch)
edge_mask = torch.randperm(graph.target_edge_index.shape[1])[:bs]
batch = torch.cat([graph.target_edge_index[:, edge_mask], graph.target_edge_type[edge_mask].unsqueeze(0)]).t()
return graph, batch
# here we assume that train_data and valid_data are tuples of datasets
def train_and_validate(cfg, model, train_data, valid_data, filtered_data=None, batch_per_epoch=None):
if cfg.train.num_epoch == 0:
return
world_size = util.get_world_size()
rank = util.get_rank()
train_triplets = torch.cat([
torch.cat([g.target_edge_index, g.target_edge_type.unsqueeze(0)]).t()
for g in train_data
])
sampler = torch_data.DistributedSampler(train_triplets, world_size, rank)
train_loader = torch_data.DataLoader(train_triplets, cfg.train.batch_size, sampler=sampler, collate_fn=partial(multigraph_collator, train_graphs=train_data))
batch_per_epoch = batch_per_epoch or len(train_loader)
cls = cfg.optimizer.pop("class")
optimizer = getattr(optim, cls)(model.parameters(), **cfg.optimizer)
num_params = sum(p.numel() for p in model.parameters())
logger.warning(line)
logger.warning(f"Number of parameters: {num_params}")
if world_size > 1:
parallel_model = nn.parallel.DistributedDataParallel(model, device_ids=[device])
else:
parallel_model = model
step = math.ceil(cfg.train.num_epoch / 10)
best_result = float("-inf")
best_epoch = -1
batch_id = 0
for i in range(0, cfg.train.num_epoch, step):
parallel_model.train()
for epoch in range(i, min(cfg.train.num_epoch, i + step)):
if util.get_rank() == 0:
logger.warning(separator)
logger.warning("Epoch %d begin" % epoch)
losses = []
sampler.set_epoch(epoch)
for batch in islice(train_loader, batch_per_epoch):
# now at each step we sample a new graph and edges from it
train_graph, batch = batch
| batch = tasks.negative_sampling(train_graph, batch, cfg.task.num_negative, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ZhengyiLuo/PerpetualHumanoidControl
# Path: uhc/khrylib/models/rnn.py
class RNN(nn.Module):
def __init__(self, input_dim, out_dim, cell_type='lstm', bi_dir=False):
super().__init__()
self.input_dim = input_dim
self.out_dim = out_dim
self.cell_type = cell_type
self.bi_dir = bi_dir
self.mode = 'batch'
rnn_cls = nn.LSTMCell if cell_type == 'lstm' else nn.GRUCell
hidden_dim = out_dim // 2 if bi_dir else out_dim
self.rnn_f = rnn_cls(self.input_dim, hidden_dim)
if bi_dir:
self.rnn_b = rnn_cls(self.input_dim, hidden_dim)
self.hx, self.cx = None, None
def set_mode(self, mode):
self.mode = mode
def initialize(self, batch_size=1, hx=None, cx=None):
if self.mode == 'step':
self.hx = zeros((batch_size, self.rnn_f.hidden_size)) if hx is None else hx
if self.cell_type == 'lstm':
self.cx = zeros((batch_size, self.rnn_f.hidden_size)) if cx is None else cx
def forward(self, x):
if self.mode == 'step':
self.hx, self.cx = batch_to(x.device, self.hx, self.cx)
if self.cell_type == 'lstm':
self.hx, self.cx = self.rnn_f(x, (self.hx, self.cx))
else:
self.hx = self.rnn_f(x, self.hx)
rnn_out = self.hx
else:
rnn_out_f = self.batch_forward(x)
if not self.bi_dir:
return rnn_out_f
rnn_out_b = self.batch_forward(x, reverse=True)
rnn_out = torch.cat((rnn_out_f, rnn_out_b), 2)
return rnn_out
def batch_forward(self, x, reverse=False):
rnn = self.rnn_b if reverse else self.rnn_f
rnn_out = []
hx = zeros((x.size(1), rnn.hidden_size), device=x.device)
if self.cell_type == 'lstm':
cx = zeros((x.size(1), rnn.hidden_size), device=x.device)
ind = reversed(range(x.size(0))) if reverse else range(x.size(0))
for t in ind:
if self.cell_type == 'lstm':
hx, cx = rnn(x[t, ...], (hx, cx))
else:
hx = rnn(x[t, ...], hx)
rnn_out.append(hx.unsqueeze(0))
if reverse:
rnn_out.reverse()
rnn_out = torch.cat(rnn_out, 0)
return rnn_out
# Path: uhc/khrylib/models/mlp.py
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dims=(128, 128), activation='tanh'):
super().__init__()
if activation == 'tanh':
self.activation = torch.tanh
elif activation == 'relu':
self.activation = torch.relu
elif activation == 'sigmoid':
self.activation = torch.sigmoid
elif activation == 'gelu':
self.activation = torch.nn.GELU()
self.out_dim = hidden_dims[-1]
self.affine_layers = nn.ModuleList()
last_dim = input_dim
for nh in hidden_dims:
self.affine_layers.append(nn.Linear(last_dim, nh))
last_dim = nh
def forward(self, x):
for affine in self.affine_layers:
x = self.activation(affine(x))
return x
# Path: uhc/khrylib/models/erd_net.py
from uhc.khrylib.utils.torch import *
from torch import nn
from uhc.khrylib.models.rnn import RNN
from uhc.khrylib.models.mlp import MLP
class ERDNet(nn.Module):
def __init__(self, state_dim):
super().__init__()
self.state_dim = state_dim
| self.encoder_mlp = MLP(state_dim, (500,), 'relu') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: laike9m/Python-Type-Challenges
# Path: views/challenge.py
ROOT_DIR = Path(__file__).parent.parent
BASIC = "basic"
INTERMEDIATE = "intermediate"
ADVANCED = "advanced"
EXTREME = "extreme"
CODE_SPLITTER: ClassVar[str] = "\n## End of your code ##\n"
EXPECT_ERROR_COMMENT = "expect-type-error"
PYRIGHT_MESSAGE_REGEX = r"^(?:.+?):(\d+):[\s\-\d]+(error:.+)$"
class Level(StrEnum):
class ChallengeKey:
class Challenge:
class TypeCheckResult:
class ChallengeManager:
def is_valid_level(cls, level: str):
def from_str(cls, key: str):
def __post_init__(self):
def parse_code(self):
def __init__(self, root_dir: Optional[Path] = None):
def has_challenge(self, key: ChallengeKey) -> bool:
def get_challenge(self, key: ChallengeKey) -> Challenge:
def challenge_count(self) -> int:
def run_challenge(self, key: ChallengeKey, user_code: str) -> TypeCheckResult:
def get_random_challenge(self) -> dict[str, str]:
def _load_challenges(root_dir: Path) -> dict[ChallengeKey, Challenge]:
def _get_challenges_groupby_level(self) -> dict[Level, list[ChallengeName]]:
def _type_check_with_pyright(
cls, user_code: str, test_code: str
) -> TypeCheckResult:
# Path: views/sitemap.py
# Path: views/utils/text.py
def render_hints(hints: str) -> str:
"""Render the hints messages to HTML format."""
return markdown.markdown(hints)
# Path: views/views.py
import ast
import platform
from functools import wraps
from flask import (
abort,
Blueprint,
jsonify,
redirect,
render_template,
request,
)
from flask_htmx import HTMX
from .challenge import ChallengeKey, Level, challenge_manager
from .sitemap import sitemapper
from .utils.text import render_hints
app_views = Blueprint("app_views", __name__)
htmx = HTMX(app_views)
def validate_challenge(view_func):
@wraps(view_func)
def wrapper(level, name, *args, **kwargs):
if Level.is_valid_level(level) and challenge_manager.has_challenge(
ChallengeKey(Level(level), name)
):
return view_func(level, name, *args, **kwargs) # valid challenge
abort(404)
return wrapper
@sitemapper.include(changefreq="daily", priority=1.0)
@app_views.route("/")
def index():
return render_template(
"index.html",
challenges_groupby_level=challenge_manager.challenges_groupby_level,
)
@sitemapper.include(
changefreq="daily",
priority=0.5,
# https://github.com/h-janes/flask-sitemapper/wiki/Usage#dynamic-routes
url_variables={
"level": [c.level for c in challenge_manager.challenges.keys()],
"name": [c.name for c in challenge_manager.challenges.keys()],
},
)
@app_views.route("/<level>/<name>", methods=["GET"])
@validate_challenge
def get_challenge(level: str, name: str):
challenge = challenge_manager.get_challenge(ChallengeKey(Level(level), name))
params = {
"name": name,
"level": challenge.level,
"challenges_groupby_level": challenge_manager.challenges_groupby_level,
"code_under_test": challenge.user_code,
"test_code": challenge.test_code,
| "hints_for_display": render_hints(challenge.hints) if challenge.hints else None, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: uni-medical/SAM-Med3D
# Path: segment_anything/modeling/common.py
class LayerNorm2d(nn.Module):
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
y = self.weight[:, None, None] * x
# y = torch.mul(self.weight[:, None, None], x)
x = y + self.bias[:, None, None]
return x
# Path: segment_anything/modeling/common.py
class MLPBlock(nn.Module):
def __init__(
self,
embedding_dim: int,
mlp_dim: int,
act: Type[nn.Module] = nn.GELU,
) -> None:
super().__init__()
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
self.act = act()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.lin2(self.act(self.lin1(x)))
# Path: segment_anything/modeling/image_encoder.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple, Type
from .common import LayerNorm2d, MLPBlock
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
| LayerNorm2d(out_chans),
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: VikParuchuri/libgen_to_txt
# Path: libgen_to_txt/settings.py
class Settings(BaseSettings):
class Config:
BASE_STORAGE_FOLDER: str = "libgen" # temp storage for downloaded chunks
BASE_PROCESSED_FOLDER: str = "processed" # After a chunk is processed, an empty file is created here
BASE_TXT_FOLDER: str = "txt" # Where the final text is stored
BASE_METADATA_FOLDER: str = "metadata" # Where to store metadata for processing
LIBGEN_DB_NAME: str = "libgen"
LIBGEN_DB_USER: str = "libgen"
LIBGEN_DB_PASS: str = "password"
CONVERSION_WORKERS: int = 18 # Number of workers to use to convert pdfs for each libgen chunk
DOWNLOAD_WORKERS: int = 8 # Number of download workers (bandwidth-bound)
MAX_TIME_TO_WAIT: int = 60 * 60 * 6 # 6 hours to wait for a download to finish
RCLONE_ADAPTER_NAME: str = "putio"
TEXT_FLAGS: int = pymupdf.TEXTFLAGS_TEXT & ~pymupdf.TEXT_PRESERVE_LIGATURES
CONVERSION_METHOD: str = "naive" # Either naive or marker. Naive is faster, but marker is more accurate.
GPU_COUNT: int = 0 # Number of GPUs to use for marker. 0 means to use CPU only
MARKER_FOLDER: str = "../marker"
MARKER_GPU_TIMEOUT: int = 60 * 60 * 8 # Time to wait for marker gpu to finish
MARKER_CPU_TIMEOUT: int = 60 * 60 * 24 # Time to wait for marker to finish
MARKER_SUPPORTED_LANGUAGES: List = ["English", "Spanish", "Portuguese", "French", "German", "Russian"]
MARKER_SUPPORTED_EXTENSIONS: List = ["pdf", "epub", "mobi", "xps", "fb2"]
MARKER_MIN_LENGTH: int = 10000 # Min amount of text to extract from file naively before using marker
MARKER_DEBUG_DATA_FOLDER: Optional[str] = None # Folder to store debug data in
POETRY_DIR: str = "~/.local/bin" # Poetry directory, used to activate marker venv
PUTIO_TOKEN: str = ""
PUTIO_FOLDER: str = "libgen"
# Path: libgen_to_txt/metadata.py
def query_metadata(fmd5):
connection = pymysql.connect(host='localhost',
user=settings.LIBGEN_DB_USER,
password=settings.LIBGEN_DB_PASS,
database=settings.LIBGEN_DB_NAME,
cursorclass=pymysql.cursors.DictCursor)
with connection:
with connection.cursor() as cursor:
# Read a single record
sql = "SELECT ue.ID, ue.Title, ue.Author, ue.Year, ue.Language, ue.Publisher, ue.Topic, ue.Extension, ue.Cleaned, ue.Scanned, ue.Pages, de.descr, de.toc from updated_edited ue left outer join description_edited de on de.md5 = ue.MD5 where ue.MD5=%s order by ue.TimeLastModified desc limit 1;"
cursor.execute(sql, (fmd5,))
metadata = cursor.fetchone()
return metadata
# Path: libgen_to_txt/marker/convert.py
import subprocess
import os
import psutil
import json
from libgen_to_txt.settings import settings
from libgen_to_txt.metadata import query_metadata
def filter_invalid(folder_name):
files = os.listdir(folder_name)
all_metadata = {}
for fname in files:
if fname.startswith("."):
continue
fpath = os.path.join(folder_name, fname)
metadata = query_metadata(fname)
if not metadata:
os.unlink(fpath)
continue
| if metadata["Language"].strip() not in settings.MARKER_SUPPORTED_LANGUAGES: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: senran101604/sagemode
# Path: accessories.py
class Notify:
"A helper class for notifications of Sagemode process"
@staticmethod
def start(username: str, number_of_sites) -> str:
start(ascii_art, delay=0.1)
if username or sites is not None:
return f"[yellow][[bright_red]*[yellow][yellow]] [bright_blue]Searching {number_of_sites} sites for target: [bright_yellow]{username}"
# notify the user how many sites the username has been found
@staticmethod
def positive_res(username: str, count) -> str:
return f"\n[yellow][[bright_red]+[yellow]][bright_green] Found [bright_red]{username} [bright_green]on [bright_magenta]{count}[bright_green] sites"
# notify the user where the result is stored
@staticmethod
def stored_result(result_file: str) -> str:
return f"[bright_green][[yellow]@[bright_green]] [orange3]Results stored in: [bright_green]{result_file}\n"
@staticmethod
def not_found(site: str, status_code="") -> str:
if status_code:
return f"[black][[red]-[black]] [blue]{site}: [yellow]Not Found! {status_code}"
return f"[black][[red]-[black]] [blue]{site}: [yellow]Not Found!"
@staticmethod
def found(site: str, url: str) -> str:
return f"[red][[green]+[red]] [green]{site}: [blue]{url}"
@staticmethod
def update(local_version: str, remote_version: str) -> str:
return (
"[red][[bright_red]![red]] [yellow]Update Available!\n[/yellow]"
+ f"[red][[yellow]![red]] [bright_yellow]You are running Version: [bright_green]{local_version}\n"
+ f"[red][[/red][yellow]![red]][bright_yellow] New Version Available: [bright_green]{remote_version}"
)
@staticmethod
def update_error(error: str) -> str:
return f"[bright_red][[bright_red]![bright_red]] [bright_yellow]A problem occured while checking for an update: [bright_red]{error}"
@staticmethod
def version(version: str) -> str:
return f"[bright_yellow]Sagemode [bright_red]{version}"
def exception(site, error):
return f"[black][[red]![black]] [blue]{site}: [bright_red]{error}..."
# Path: sites.py
# Path: sagemode.py
import os
import re
import datetime
import subprocess
import threading
import random
import requests
from argparse import ArgumentParser
from rich.console import Console
from bs4 import BeautifulSoup
from accessories import Notify
from sites import sites, soft404_indicators, user_agents
#! /usr/bin/env python3
"""
Sagemode: Track and Unveil Online identities across social media platforms.
"""
__version__ = "1.1.3"
class Sagemode:
def __init__(self, username: str, found_only=False):
self.console = Console()
self.notify = Notify
self.positive_count = 0
self.username = username
self.result_file = os.path.join("data", f"{self.username}.txt")
self.found_only = found_only
# this function checks if the url not a false positive result, return false
def is_soft404(self, html_response: str) -> bool:
# this is for checking the title bar of the page
soup = BeautifulSoup(html_response, "html.parser")
page_title = soup.title.string.strip() if soup.title else ""
# I know this is kinda messy solution but it currently solve.. reduce the problem
# in soft404 responses (false positives)
for error_indicator in soft404_indicators:
if (
# check if the error indicator is in the html string response
error_indicator.lower() in html_response.lower()
# check for the title bar of the page if there are anyi error_indicator
or error_indicator.lower() in page_title.lower()
# Specific check sites, since positive result will have the username in the title bar.
or page_title.lower() == "instagram"
# patreon's removed user
or page_title.lower() == "patreon logo"
or "sign in" in page_title.lower()
):
return True
return False
def check_site(self, site: str, url: str, headers):
url = url.format(self.username)
# we need headers to avoid being blocked by requesting the website 403 error
try:
with requests.Session() as session:
response = session.get(url, headers=headers)
# Raises an HTTPError for bad responses
# further check to reduce false positive results
if (
response.status_code == 200
and self.username.lower() in response.text.lower()
and not self.is_soft404(response.text)
):
# to prevent multiple threads from accessing/modifying the positive
# counts simultaneously and prevent race conditions.
with threading.Lock():
self.positive_count += 1
self.console.print(self.notify.found(site, url))
with open(self.result_file, "a") as f:
f.write(f"{url}\n")
# the site reurned 404 (user not found)
else:
if not self.found_only:
self.console.print(self.notify.not_found(site))
except Exception as e:
self.notify.exception(site, e)
def start(self):
"""
Start the search.
"""
self.console.print(self.notify.start(self.username, len(sites)))
current_datetime = datetime.datetime.now()
date = current_datetime.strftime("%m/%d/%Y")
time = current_datetime.strftime("%I:%M %p")
| headers = {"User-Agent": random.choice(user_agents)} |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: NVIDIA/GenerativeAIExamples
# Path: RetrievalAugmentedGeneration/common/utils.py
DEFAULT_MAX_CONTEXT = 1500
DEFAULT_NUM_TOKENS = 150
TEXT_SPLITTER_EMBEDDING_MODEL = "intfloat/e5-large-v2"
class LimitRetrievedNodesLength(BaseNodePostprocessor):
def _postprocess_nodes(
self, nodes: List["NodeWithScore"] = [], query_bundle: Optional["QueryBundle"] = None
) -> List["NodeWithScore"]:
def set_service_context() -> None:
def get_config() -> "ConfigWizard":
def get_vector_index() -> VectorStoreIndex:
def get_doc_retriever(num_nodes: int = 4) -> "BaseRetriever":
def get_llm() -> LangChainLLM:
def get_embedding_model() -> LangchainEmbedding:
def is_base64_encoded(s: str) -> bool:
def get_text_splitter() -> SentenceTransformersTokenTextSplitter:
# Path: RetrievalAugmentedGeneration/examples/developer_rag/chains.py
def llm_chain(
context: str, question: str, num_tokens: int
) -> Generator[str, None, None]:
def rag_chain(prompt: str, num_tokens: int) -> Generator[str, None, None]:
def ingest_docs(data_dir: str, filename: str) -> None:
# Path: RetrievalAugmentedGeneration/common/server.py
import base64
import os
import shutil
import logging
from pathlib import Path
from typing import Any, Dict, List
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel, Field
from pymilvus.exceptions import MilvusException, MilvusUnavailableException
from RetrievalAugmentedGeneration.common import utils
from RetrievalAugmentedGeneration.examples.developer_rag import chains
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The definition of the Llama Index chain server."""
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# create the FastAPI server
app = FastAPI()
# prestage the embedding model
_ = utils.get_embedding_model()
# set the global service context for Llama Index
utils.set_service_context()
class Prompt(BaseModel):
"""Definition of the Prompt API data type."""
question: str = Field(description="The input query/prompt to the pipeline.")
context: str = Field(description="Additional context for the question (optional)")
use_knowledge_base: bool = Field(description="Whether to use a knowledge base", default=True)
num_tokens: int = Field(description="The maximum number of tokens in the response.", default=50)
class DocumentSearch(BaseModel):
"""Definition of the DocumentSearch API data type."""
content: str = Field(description="The content or keywords to search for within documents.")
num_docs: int = Field(description="The maximum number of documents to return in the response.", default=4)
@app.post("/uploadDocument")
async def upload_document(file: UploadFile = File(...)) -> JSONResponse:
"""Upload a document to the vector store."""
if not file.filename:
return JSONResponse(content={"message": "No files provided"}, status_code=200)
try:
upload_folder = "uploaded_files"
upload_file = os.path.basename(file.filename)
if not upload_file:
raise RuntimeError("Error parsing uploaded filename.")
file_path = os.path.join(upload_folder, upload_file)
uploads_dir = Path(upload_folder)
uploads_dir.mkdir(parents=True, exist_ok=True)
with open(file_path, "wb") as f:
shutil.copyfileobj(file.file, f)
| chains.ingest_docs(file_path, upload_file) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Hackl0us/apple-spyder
# Path: classes/database.py
class DatabaseUtil:
def __init__(self):
import sqlite3
self.conn = sqlite3.connect('res/apple-spyder.db')
def db_select(self, sql):
try:
c = self.conn.execute(sql)
return c.fetchall()
except ValueError as err:
print(err)
def db_operate(self, *sql):
try:
self.conn.execute(*sql)
self.conn.commit()
except ValueError as err:
print(err)
def close(self):
self.conn.close()
# Path: classes/datetime.py
def covert_to_local_timezone(datetime):
return parser.parse(datetime).astimezone(tz=None)
# Path: classes/datetime.py
def is_a_previous_time(last_update_time, current_time):
if parser.parse(last_update_time) < parser.parse(current_time):
return True
else:
return False
# Path: classes/telegram.py
class Telegram:
def __init__(self):
config = _get_bot_config()
self.enable = config['enable']
self.bot_token = config['bot-token']
self.chat_id = config['chat-id']
def send_message(self, message, chat_id=None, parse_in_markdown=False):
if not self.enable:
logging.warning("Telegram posting feature is DISABLED.")
return
if chat_id is None:
chat_id = self.chat_id
send_message_url = f'https://api.telegram.org/bot{self.bot_token}/sendMessage?chat_id={chat_id}&text={message}'
if parse_in_markdown:
send_message_url += '&parse_mode=markdown'
requests.get(send_message_url)
# Path: classes/weibo.py
class Weibo:
def __init__(self):
config = _get_weibo_config()
self.enable = config['enable']
self.access_token = config['access-token']
self.redirect_uri = config['redirect-uri']
self.rip = config['rip']
def post_weibo(self, message):
if not self.enable:
logging.warning("Weibo posting feature is DISABLED.")
return
url = "https://api.weibo.com/2/statuses/share.json"
params = {"access_token": self.access_token, "status": str(message) + self.redirect_uri, "rip": self.rip}
res = requests.post(url, data=params)
print(res.text)
# Path: airpods_update_detection.py
import logging
import plistlib
import urllib.request
from classes.database import DatabaseUtil
from classes.datetime import covert_to_local_timezone
from classes.datetime import is_a_previous_time
from classes.telegram import Telegram
from classes.weibo import Weibo
def main():
ota_update_url = "https://mesu.apple.com/assets/com_apple_MobileAsset_UARP_A2618/com_apple_MobileAsset_UARP_A2618.xml"
with urllib.request.urlopen(ota_update_url) as response:
firmware_release_date = response.headers['last-modified']
plist_content = plistlib.loads(response.read())
# Get last OTA update time from db
| db = DatabaseUtil() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lm-sys/llm-decontaminator
# Path: detect_instruct.py
def datatype_to_instruct(data_type):
if data_type == "code":
return code_instruct
elif data_type == "number_substitution":
return strong_math_instruct
elif data_type == "math":
return math_instruct
elif data_type == "knowledge":
return knowledge_instruct
else:
raise Exception("Invalid data type: {}".format(data_type))
# Path: llm_detect.py
def llm_detect(model, database, output_path, instruct, max_workers=32):
results = []
futures = []
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
for i, pairs in enumerate(database):
test_case = pairs["test"]
case_results = []
for train_case in pairs["train"]:
future = executor.submit(detect_contamination, model, test_case, train_case, instruct)
case_results.append(future)
futures.append(case_results)
for case_results in futures:
results.append([future.result() for future in case_results])
for i in range(len(database)):
database[i]["results"] = results[i]
with open(output_path, "w") as fout:
for each in database:
fout.write(json.dumps(each) + "\n")
return database
# Path: llm_detect.py
def check_openai_key():
if not "OPENAI_API_KEY" in os.environ:
raise Exception("Please set your OPENAI_API_KEY environment variable.")
# Path: vector_db.py
def build_database(model, train_path, test_path, output_path, top_k=1, batch_size=32, device=None):
train_cases = read_dataset(train_path)
test_cases = read_dataset(test_path)
train_embs = bert_encode(model, train_cases, batch_size=batch_size, device=device)
test_embs = bert_encode(model, test_cases, batch_size=batch_size, device=device)
top_k_indices = top_k_similarity(train_embs, test_embs, top_k)
db = []
for i, test_case in enumerate(test_cases):
top_k_cases = [train_cases[index] for index in top_k_indices[i]]
db.append({"test": test_case, "train": top_k_cases})
with open(output_path, "w") as f:
for each in db:
f.write(json.dumps(each) + "\n")
return db
# Path: show_samples.py
def show(database, mode="all"):
for each in database:
test_case = each["test"]
for i, train_case in enumerate(each["train"]):
if each["results"][i]:
print(f"Test case:\n{test_case}\n")
print(f"Train case:\n{train_case}\n")
rephrase_num = sum([1 if True in each["results"] else 0 for each in database])
print(f"Rephrase num: {rephrase_num}")
# Path: main.py
import argparse
from sentence_transformers import SentenceTransformer
from detect_instruct import datatype_to_instruct
from llm_detect import llm_detect, check_openai_key
from vector_db import build_database
from show_samples import show
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Build database of top-k similar cases')
parser.add_argument('--train_path', type=str, required=True, help='Path to train cases')
parser.add_argument('--test_path', type=str, required=True, help='Path to test cases')
parser.add_argument('--output_path', type=str, required=True, help='Path to output database')
parser.add_argument('--bert-model', type=str, default='multi-qa-MiniLM-L6-cos-v1', help='Path to sentence transformer model')
parser.add_argument('--top_k', type=int, default=1, help='Number of top-k similar cases to retrieve')
parser.add_argument('--batch_size', type=int, default=32, help='Batch size for encoding')
parser.add_argument('--device', type=str, default=None, help='Device to use for encoding (e.g. "cuda:0")')
parser.add_argument("--model", type=str, default="gpt-4", help="The name of the OpenAI model to use")
parser.add_argument("--data-type", type=str, default="code", help="The name of the instruction function to use")
parser.add_argument("--max-workers", type=int, default=2, help="The maximum number of worker threads to use")
args = parser.parse_args()
check_openai_key()
bert_model = SentenceTransformer(args.bert_model)
database = build_database(bert_model, args.train_path, args.test_path, args.output_path, args.top_k, args.batch_size, args.device)
| instruct = datatype_to_instruct(args.data_type) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: MolecularAI/REINVENT4
# Path: reinvent_plugins/components/component_results.py
class ComponentResults:
"""Container for the scores, uncertainties and meta data
At the minimum the scores must be provided. The order of the score array
must be the same as the order of SMILES passed to each component. Failure
of computation of score must be indicated by NaN. Do not use zero for this!
scores_properties can be used to pass on meta data on the scores
uncertainty_type is currently assumed to be the same for all values
failure_properties can be used to provide details on the failure of a component
meta_data is a general facility to pass on meta data
"""
scores: List[np.ndarray]
scores_properties: Optional[List[Dict]] = None
uncertainty: Optional[List[np.ndarray]] = None
uncertainty_type: Optional[str] = None
uncertainty_properties: Optional[List[Dict]] = None
failures_properties: Optional[List[Dict]] = None
metadata: Optional[Dict] = None
# Path: reinvent_plugins/components/run_program.py
def run_command(command: List[str], env: dict = None, input=None) -> sp.CompletedProcess:
"""Run an external command in a subprocess.
:params command: array of command line arguments
:returns: output object from the subprocess
"""
args = dict(capture_output=True, text=True, check=True, shell=False)
if env:
args.update({"env": env})
if input:
args.update({"input": input})
try:
result = sp.run(command, **args)
except sp.CalledProcessError as error:
ret = error.returncode
out = error.stdout
err = error.stderr
raise RuntimeError(
f"{__name__}: {' '.join(command)} has failed with exit "
f"code {ret}: stdout={out}, stderr={err}"
)
return result
# Path: reinvent_plugins/components/add_tag.py
def add_tag(label: str, text: str = "True"):
"""A simple decorator to tag a class"""
def wrapper(cls):
setattr(cls, label, text)
return cls
return wrapper
# Path: reinvent_plugins/normalize.py
def normalize_smiles(func: Callable):
def wrapper(self, smilies: List[str]):
normalizer = getattr(normalizers, self.smiles_type)
cleaned_smilies = normalizer.normalize(smilies)
return func(self, cleaned_smilies)
return wrapper
# Path: reinvent_plugins/components/comp_mmp.py
import logging
import shlex
import numpy as np
import pandas as pd
from io import StringIO
from dataclasses import dataclass, field
from typing import List
from rdkit import Chem
from .component_results import ComponentResults
from .run_program import run_command
from .add_tag import add_tag
from ..normalize import normalize_smiles
"""Matched molecular pairs"""
from __future__ import annotations
__all__ = ["MMP"]
logger = logging.getLogger('reinvent')
@add_tag("__parameters")
@dataclass
class Parameters:
"""Parameters for the scoring component
Note that all parameters are always lists because components can have
multiple endpoints and so all the parameters from each endpoint is
collected into a list. This is also true in cases where there is only one
endpoint.
"""
reference_smiles: List[List[str]]
num_of_cuts: List[int] = field(default_factory=lambda: [1])
max_variable_heavies: List[int] = field(default_factory=lambda: [40])
max_variable_ratio: List[float] = field(default_factory=lambda: [0.33])
FRAG_CMD = "mmpdb --quiet fragment --num-cuts {ncuts}"
IDX_CMD = (
"mmpdb --quiet index --out csv --symmetric --max-variable-heavies {heavy} "
"--max-variable-ratio {ratio}"
)
@add_tag("__component")
class MMP:
def __init__(self, params: Parameters):
self.ref_smilies = params.reference_smiles
self.num_of_cuts = params.num_of_cuts
self.max_variable_heavies = params.max_variable_heavies
self.max_variable_ratio = params.max_variable_ratio
# needed in the normalize_smiles decorator
# FIXME: really needs to be configurable for each model separately
self.smiles_type = 'rdkit_smiles'
@normalize_smiles
def __call__(self, smilies: List[str]) -> np.array:
scores = []
self.ref_smilies = [[Chem.MolToSmiles(Chem.MolFromSmiles(smi), isomericSmiles=False)
for smi in self.ref_smilies[0] if Chem.MolFromSmiles(smi)]]
smilies = [Chem.MolToSmiles(Chem.MolFromSmiles(smi), isomericSmiles=False) for smi in smilies if
Chem.MolFromSmiles(smi)]
for ref_smilies, ncuts, max_heavy, max_ratio in zip(
self.ref_smilies, self.num_of_cuts, self.max_variable_heavies, self.max_variable_ratio
):
smiles_csv = format_smilies(smilies, ref_smilies)
frag_cmd = FRAG_CMD.format(ncuts=ncuts)
| result1 = run_command(shlex.split(frag_cmd), input=smiles_csv) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lion-agi/lionagi
# Path: lionagi/utils/sys_util.py
def create_copy(input: Any, n: int) -> Any:
"""
Creates a deep copy of the input object a specified number of times.
This function makes deep copies of the provided input. If the number of copies ('n')
is greater than 1, a list of deep copies is returned. For a single copy, it returns
the copy directly.
Parameters:
input (Any): The object to be copied.
n (int): The number of deep copies to create.
Raises:
ValueError: If 'n' is not a positive integer.
Returns:
Any: A deep copy of 'input' or a list of deep copies if 'n' > 1.
Example:
>>> sample_dict = {'key': 'value'}
>>> make_copy(sample_dict, 2)
[{'key': 'value'}, {'key': 'value'}]
"""
if not isinstance(n, int) or n < 1:
raise ValueError(f"'n' must be a positive integer: {n}")
return copy.deepcopy(input) if n == 1 else [copy.deepcopy(input) for _ in range(n)]
# Path: lionagi/utils/sys_util.py
def create_id(n=32) -> str:
"""
Generates a unique ID based on the current time and random bytes.
This function combines the current time in ISO 8601 format with 16 random bytes
to create a unique identifier. The result is hashed using SHA-256 and the first
16 characters of the hexadecimal digest are returned.
Returns:
str: A 16-character unique identifier.
Example:
>>> create_id() # Doctest: +ELLIPSIS
'...'
"""
current_time = datetime.now().isoformat().encode('utf-8')
random_bytes = os.urandom(2048)
return hashlib.sha256(current_time + random_bytes).hexdigest()[:n]
# Path: lionagi/utils/sys_util.py
def change_dict_key(dict_, old_key, new_key):
dict_[new_key] = dict_.pop(old_key)
# Path: lionagi/utils/sys_util.py
def is_schema(dict_: Dict, schema: Dict):
for key, expected_type in schema.items():
if not isinstance(dict_[key], expected_type):
return False
return True
# Path: lionagi/utils/encrypt_util.py
def encrypt(data: str, key: str) -> str:
"""Encrypts data using the provided key."""
fernet = Fernet(key.encode())
return fernet.encrypt(data.encode()).decode()
# Path: lionagi/utils/encrypt_util.py
def decrypt(data: str, key: str) -> str:
"""Decrypts data using the provided key."""
fernet = Fernet(key.encode())
return fernet.decrypt(data.encode()).decode()
# Path: lionagi/utils/convert_util.py
def dict_to_xml(data: Dict[str, Any], root_tag: str = 'node') -> str:
"""
Helper method to convert a dictionary to an XML string.
Parameters:
data (Dict[str, Any]): The dictionary to convert to XML.
root_tag (str): The root tag name for the XML.
Returns:
str: An XML string representation of the dictionary.
"""
root = ET.Element(root_tag)
_build_xml(root, data)
return ET.tostring(root, encoding='unicode')
# Path: lionagi/schema/base_node.py
import json
import xml.etree.ElementTree as ET
from typing import Any, Dict, Optional, TypeVar, Type, List, Callable, Union
from pydantic import BaseModel, Field, AliasChoices
from lionagi.utils import (
create_id, is_schema, change_dict_key, create_copy,
encrypt, decrypt, dict_to_xml
)
# uses utils
T = TypeVar('T', bound='BaseNode')
class BaseNode(BaseModel):
"""
A foundational building block for representing a node in a graph-like structure.
This class includes functionalities for serialization, metadata manipulation,
content encryption/decryption, and utility methods.
Attributes:
id_ (str): Unique identifier for the node, aliased as 'node_id'.
metadata (Dict[str, Any]): Dictionary of metadata related to the node.
label (Optional[str]): Label categorizing or identifying the node.
related_nodes (List[str]): Identifiers for nodes related to this node.
content (Union[str, Dict[str, Any], None, Any]): Content of the node.
"""
| id_: str = Field(default_factory=lambda: str(create_id()), alias="node_id") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: stanford-oval/WikiChat
# Path: ColBERT/colbert/search/strided_tensor_core.py
class StridedTensorCore:
# # @profile
def __init__(self, packed_tensor, lengths, dim=None, use_gpu=True):
self.dim = dim
self.tensor = packed_tensor
self.inner_dims = self.tensor.size()[1:]
self.use_gpu = use_gpu
self.lengths = lengths.long() if torch.is_tensor(lengths) else torch.LongTensor(lengths)
self.strides = _select_strides(self.lengths, [.5, .75, .9, .95]) + [self.lengths.max().item()]
self.max_stride = self.strides[-1]
zero = torch.zeros(1, dtype=torch.long, device=self.lengths.device)
self.offsets = torch.cat((zero, torch.cumsum(self.lengths, dim=0)))
if self.offsets[-2] + self.max_stride > self.tensor.size(0):
# if self.tensor.size(0) > 10_000_000:
# print("#> WARNING: StridedTensor has to add padding, internally, to a large tensor.")
# print("#> WARNING: Consider doing this padding in advance to save memory!")
padding = torch.zeros(self.max_stride, *self.inner_dims, dtype=self.tensor.dtype, device=self.tensor.device)
self.tensor = torch.cat((self.tensor, padding))
self.views = {stride: _create_view(self.tensor, stride, self.inner_dims) for stride in self.strides}
@classmethod
def from_packed_tensor(cls, tensor, lengths):
return cls(tensor, lengths)
@classmethod
def from_padded_tensor(cls, tensor, mask):
pass
@classmethod
def from_nested_list(cls, lst):
flat_lst = flatten(lst)
tensor = torch.Tensor(flat_lst)
lengths = [len(sublst) for sublst in lst]
return cls(tensor, lengths, dim=0)
@classmethod
def from_tensors_list(cls, tensors):
# torch.cat(tensors)
# lengths.
# cls(tensor, lengths)
raise NotImplementedError()
def as_packed_tensor(self, return_offsets=False):
unpadded_packed_tensor = self.tensor # [:self.offsets[-1]]
return_vals = [unpadded_packed_tensor, self.lengths]
if return_offsets:
return_vals.append(self.offsets)
return tuple(return_vals)
# # @profile
def as_padded_tensor(self):
if self.use_gpu:
view = _create_view(self.tensor.cuda(), self.max_stride, self.inner_dims)[self.offsets[:-1]]
mask = _create_mask(self.lengths.cuda(), self.max_stride, like=view, use_gpu=self.use_gpu)
else:
#import pdb
#pdb.set_trace()
view = _create_view(self.tensor, self.max_stride, self.inner_dims)
view = view[self.offsets[:-1]]
mask = _create_mask(self.lengths, self.max_stride, like=view, use_gpu=self.use_gpu)
return view, mask
def as_tensors_list(self):
raise NotImplementedError()
# Path: ColBERT/colbert/search/strided_tensor_core.py
def _create_mask(lengths, stride, like=None, use_gpu=True):
if use_gpu:
mask = torch.arange(stride).cuda() + 1
mask = mask.unsqueeze(0) <= lengths.cuda().unsqueeze(-1)
else:
mask = torch.arange(stride) + 1
mask = mask.unsqueeze(0) <= lengths.unsqueeze(-1)
if like is not None:
for _ in range(like.dim() - mask.dim()):
mask = mask.unsqueeze(-1)
return mask
# Path: ColBERT/colbert/search/strided_tensor_core.py
def _create_view(tensor, stride, inner_dims):
outdim = tensor.size(0) - stride + 1
size = (outdim, stride, *inner_dims)
inner_dim_prod = int(np.prod(inner_dims))
multidim_stride = [inner_dim_prod, inner_dim_prod] + [1] * len(inner_dims)
return torch.as_strided(tensor, size=size, stride=multidim_stride)
# Path: ColBERT/colbert/search/strided_tensor.py
from struct import pack
from torch._C import device
from colbert.utils.utils import flatten, print_message
from .strided_tensor_core import StridedTensorCore, _create_mask, _create_view
from torch.utils.cpp_extension import load
import torch
import os
import pathlib
import os
import pickle
import time
class StridedTensor(StridedTensorCore):
def __init__(self, packed_tensor, lengths, dim=None, use_gpu=True):
super().__init__(packed_tensor, lengths, dim=dim, use_gpu=use_gpu)
StridedTensor.try_load_torch_extensions(use_gpu)
@classmethod
def try_load_torch_extensions(cls, use_gpu):
if hasattr(cls, "loaded_extensions") or use_gpu:
return
print_message(f"Loading segmented_lookup_cpp extension (set COLBERT_LOAD_TORCH_EXTENSION_VERBOSE=True for more info)...")
segmented_lookup_cpp = load(
name="segmented_lookup_cpp",
sources=[
os.path.join(
pathlib.Path(__file__).parent.resolve(), "segmented_lookup.cpp"
),
],
extra_cflags=["-O3"],
verbose=os.getenv("COLBERT_LOAD_TORCH_EXTENSION_VERBOSE", "False") == "True",
)
cls.segmented_lookup = segmented_lookup_cpp.segmented_lookup_cpp
cls.loaded_extensions = True
@classmethod
def pad_packed(cls, packed_tensor, lengths):
assert False, "This seems to be incorrect but I can't see why. Is it the inner_dims in the views?"
packed_tensor, lengths = packed_tensor.cuda().contiguous(), lengths.cuda()
inner_dims = packed_tensor.size()[1:]
stride = lengths.max().item()
offsets = torch.cumsum(lengths, dim=0) - lengths[0]
padding = torch.zeros(stride, *inner_dims, device=packed_tensor.device, dtype=packed_tensor.dtype)
packed_tensor = torch.cat((packed_tensor, padding))
| view = _create_view(packed_tensor, stride, inner_dims)[offsets] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kyegomez/BitNet
# Path: bitnet/bitlinear.py
class BitLinear(nn.Module):
def __init__(self, in_features, out_features, bias=True):
def forward(self, input):
# Path: bitnet/transformer.py
class Transformer(nn.Module):
class BitNetTransformer(nn.Module):
def __init__(self, dim: int, heads: int, depth: int, ff_mult=2, *args, **kwargs):
def forward(self, x):
def __init__(
self,
dim: int,
depth: int,
num_tokens: int,
heads=8,
ff_mult=4,
):
def forward(self, x):
# Path: tests/tests.py
import pytest
import torch
from torch.nn import functional as F
from bitnet.bitlinear import BitLinear, absmax_quantize
from bitnet.transformer import BitNetTransformer, ParallelTransformerBlock, Transformer
)
def test_bitlinear_shapes(in_features, out_features):
layer = BitLinear(in_features, out_features)
assert layer.weight.shape == (out_features, in_features)
@pytest.mark.parametrize("groups", [1, 2, 5])
def test_bitlinear_groups(groups):
layer = BitLinear(10, 20, groups=groups)
assert layer.groups == groups
def test_bitlinear_reset_parameters():
layer = BitLinear(10, 20)
original_weights = layer.weight.clone()
layer.reset_parameters()
assert not torch.equal(original_weights, layer.weight)
@pytest.mark.parametrize("groups", [1, 2, 5])
def test_bitlinear_forward_with_groups(random_tensor, groups):
layer = BitLinear(10, 20, groups=groups)
output = layer(random_tensor)
assert output.shape == (5, 20)
def test_bitlinear_zero_input():
layer = BitLinear(10, 20)
input_tensor = torch.zeros(5, 10)
output = layer(input_tensor)
assert torch.allclose(output, torch.zeros(5, 20), atol=1e-2)
def test_bitlinear_weight_sign():
layer = BitLinear(10, 20)
input_tensor = torch.randn(5, 10)
output_before = layer(input_tensor)
layer.weight.data = torch.abs(layer.weight.data)
output_after = layer(input_tensor)
assert not torch.allclose(output_before, output_after)
@pytest.mark.parametrize("groups", [1, 2, 5])
def test_bitlinear_weight_group_normalization(groups):
layer = BitLinear(10, 20, groups=groups)
weight = layer.weight.view(groups, -1)
mean = weight.mean(dim=1, keepdim=True)
assert torch.allclose(mean, torch.zeros_like(mean), atol=1e-2)
def test_bitlinear_weight_group_scaling():
layer = BitLinear(10, 20, groups=5)
weight = layer.weight.view(layer.groups, -1)
beta = torch.abs(weight).sum(dim=1, keepdim=True) / (
weight.shape[0] * weight.shape[1]
)
scaled_weight = weight * beta
assert torch.allclose(scaled_weight, layer.weight.view(20, 10))
def test_bitlinear_input_quantization(random_tensor):
layer = BitLinear(10, 20)
quant_input, _ = absmax_quantize(random_tensor)
output = layer(quant_input.float())
assert output.shape == (5, 20)
# ... Continue adding more tests ...
# - Test the forward pass with extreme input values.
# - Test with different types of input tensors (e.g., int8, float16).
# - Test the forward pass with batch sizes other than 5.
# - Verify that using different initializations produces different results.
# - Test the weight and input interactions during the forward pass.
# - And many more...
# ================================ Transformer with bitlinear ================================
@pytest.fixture
def random_tensor():
"""A fixture to generate a random tensor"""
return torch.randn(32, 512)
@pytest.fixture
def bitnet_model():
"""A fixture to create an instance of BitNetTransformer model"""
return BitNetTransformer(
num_tokens=20000,
dim=512,
depth=6,
dim_head=64,
heads=8,
ff_mult=4,
)
@pytest.mark.parametrize(
"dim, dim_head, heads, ff_mult",
[
(512, 64, 8, 4),
(256, 32, 4, 2),
(128, 16, 2, 1),
],
)
def test_parallel_transformer_block(dim, dim_head, heads, ff_mult, random_tensor):
block = ParallelTransformerBlock(dim, dim_head, heads, ff_mult)
output = block(random_tensor)
assert output.shape == random_tensor.shape
@pytest.mark.parametrize(
"dim, depth, heads, dim_head, ff_mult",
[
(512, 6, 8, 64, 4),
(256, 3, 4, 32, 2),
(128, 2, 2, 16, 1),
],
)
def test_transformer(dim, depth, heads, dim_head, ff_mult, random_tensor):
| transformer = Transformer(dim, depth, heads, dim_head, ff_mult) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TonicAI/tvalmetrics
# Path: tonic_validate/classes/llm_response.py
class LLMResponse(BaseModel):
llm_answer: str
llm_context_list: list[str]
benchmark_item: BenchmarkItem
# Path: tonic_validate/metrics/augmentation_accuracy_metric.py
class AugmentationAccuracyMetric(Metric):
name = "augmentation_accuracy"
def score(self, llm_response: LLMResponse, openai_service: OpenAIService) -> float:
return self.calculate_metric(llm_response, openai_service)[0]
def calculate_metric(
self, llm_response: LLMResponse, openai_service: OpenAIService
) -> Tuple[float, List[bool]]:
contains_context_list = []
for context in llm_response.llm_context_list:
contains_context_response = answer_contains_context_call(
llm_response.llm_answer, context, openai_service
)
contains_context_list.append(
parse_boolean_response(contains_context_response)
)
score = sum(contains_context_list) / len(contains_context_list)
return (score, contains_context_list)
# Path: tonic_validate/metrics/metric.py
class Metric(ABC):
@property
@abstractmethod
def name(self) -> str:
pass
@abstractmethod
def score(self, llm_response: LLMResponse, openai_service: OpenAIService) -> float:
pass
# Path: tonic_validate/metrics/retrieval_precision_metric.py
class RetrievalPrecisionMetric(Metric):
name = "retrieval_precision"
def score(self, llm_response: LLMResponse, openai_service: OpenAIService) -> float:
return self.calculate_metric(llm_response, openai_service)[0]
def calculate_metric(
self, llm_response: LLMResponse, openai_service: OpenAIService
) -> Tuple[float, List[bool]]:
context_relevant_list = []
for context in llm_response.llm_context_list:
relevance_response = context_relevancy_call(
llm_response.benchmark_item.question, context, openai_service
)
context_relevant_list.append(parse_boolean_response(relevance_response))
score = sum(context_relevant_list) / len(context_relevant_list)
return (score, context_relevant_list)
# Path: tonic_validate/services/openai_service.py
class OpenAIService:
def __init__(self, model: str = "gpt-4-1106-preview") -> None:
self.client = OpenAI()
self.model = model
self.cache: Dict[str, str] = {}
def get_response(
self,
prompt: str,
max_retries: int = 5,
) -> str:
if prompt in self.cache:
return self.cache[prompt]
while max_retries > 0:
try:
completion = self.client.chat.completions.create(
model=self.model,
messages=[
{
"role": "system",
"content": "You are a helpful assistant. Respond using markdown.",
},
{"role": "user", "content": prompt},
],
temperature=0.0,
)
response = completion.choices[0].message.content
if response is None:
raise Exception(
f"Failed to get message response from {self.model}, message does not exist"
)
self.cache[prompt] = response
return response
except Exception as e:
print(e)
max_retries -= 1
raise Exception(
f"Failed to get completion response from {self.model}, max retires hit"
)
# Path: tonic_validate/metrics/augmentation_precision_metric.py
import logging
from typing import List
from tonic_validate.classes.llm_response import LLMResponse
from tonic_validate.metrics.augmentation_accuracy_metric import (
AugmentationAccuracyMetric
)
from tonic_validate.metrics.metric import Metric
from tonic_validate.metrics.retrieval_precision_metric import RetrievalPrecisionMetric
from tonic_validate.services.openai_service import OpenAIService
logger = logging.getLogger()
class AugmentationPrecisionMetric(Metric):
name = "augmentation_precision"
def __init__(self) -> None:
self.augmentation_accuracy = AugmentationAccuracyMetric()
self.retrieval_precision = RetrievalPrecisionMetric()
| def score(self, llm_response: LLMResponse, openai_service: OpenAIService) -> float: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: jhejna/cpl
# Path: research/datasets/replay_buffer/storage.py
def load_data(path: str, exclude_keys: Optional[List[str]]) -> Dict:
def save_data(data: Dict, path: str) -> None:
def get_bytes(buffer: Union[Dict, np.ndarray]) -> int:
def capacity(self):
def size(self):
def starts(self):
def ends(self):
def lengths(self):
def bytes(self):
def save(self, path):
def __getitem__(self, key):
def __getattr__(self, name):
def __contains__(self, key):
def add(self, data):
def extend(self, data):
def __init__(self, buffers: Dict) -> None:
def add(self, data):
def extend(self, data):
def __init__(self, initial_capacity: int = 100, dtype=np.int64):
def _reset(self):
def append(self, value):
def pop(self):
def popleft(self):
def view(self):
def __len__(self):
def first(self):
def last(self):
def __str__(self):
def __init__(self, buffer_space: Union[Dict, gym.spaces.Dict], capacity: Optional[int] = None) -> None:
def _update_markers(self, new_ends: Iterable = ()):
def add(self, data):
def extend(self, data):
class Storage(abc.ABC):
class FixedStorage(Storage):
class NPQueue(object):
class CircularStorage(Storage):
# Path: research/envs/metaworld.py
class MetaWorldSawyerImageWrapper(gym.Wrapper):
def __init__(self, env, width=64, height=64, camera="corner2", show_goal=False):
assert isinstance(
env.unwrapped, MetaWorldSawyerEnv
), "MetaWorld Wrapper must be used with a MetaWorldSawyerEnv class"
super().__init__(env)
self._width = width
self._height = height
self._camera = camera
self._show_goal = show_goal
shape = (3, self._height, self._width)
self.observation_space = gym.spaces.Box(low=0, high=255, shape=shape, dtype=np.uint8)
def _get_image(self):
if not self._show_goal:
try:
self.env.unwrapped._set_pos_site("goal", np.inf * self.env.unwrapped._target_pos)
except ValueError:
pass # If we don't have the goal site, just continue.
img = self.env.render(mode="rgb_array", camera_name=self._camera, width=self._width, height=self._height)
return img.transpose(2, 0, 1)
def step(self, action):
state_obs, reward, done, info = self.env.step(action)
# Throw away the state-based observation.
info["state"] = state_obs
return self._get_image().copy(), reward, done, info
def reset(self):
# Zoom in camera corner2 to make it better for control
# I found this view to work well across a lot of the tasks.
camera_name = "corner2"
# Original XYZ is 1.3 -0.2 1.1
index = self.model.camera_name2id(camera_name)
self.model.cam_fovy[index] = 20.0 # FOV
self.model.cam_pos[index][0] = 1.5 # X
self.model.cam_pos[index][1] = -0.35 # Y
self.model.cam_pos[index][2] = 1.1 # Z
self.env.reset()
return self._get_image().copy() # Return the image observation
# Path: scripts/render_metaworld_dataset.py
import argparse
import io
import gym
import numpy as np
from research.datasets.replay_buffer import storage
from research.envs.metaworld import MetaWorldSawyerImageWrapper
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str, required=True, help="Path to the dataset")
parser.add_argument("--output", type=str, required=True, help="Path to output the new dataset")
parser.add_argument("--resolution", type=int, default=64, help="Resolution to render")
parser.add_argument("--env", type=str, required=True)
args = parser.parse_args()
data = storage.load_data(args.path, exclude_keys=["mask"])
assert "state" in data
env = gym.make(args.env)
| env = MetaWorldSawyerImageWrapper(env, width=args.resolution, height=args.resolution) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nbasyl/LLM-FP4
# Path: lm_eval/metrics.py
def mean(arr):
return sum(arr) / len(arr)
# Path: lm_eval/metrics.py
def weighted_perplexity(items):
return math.exp(-weighted_mean(items))
# Path: lm_eval/metrics.py
def weighted_mean(items):
a, b = zip(*items)
return sum(a) / sum(b)
# Path: lm_eval/metrics.py
def bits_per_byte(items):
return -weighted_mean(items) / math.log(2)
# Path: lm_eval/utils.py
class ExitCodeError(Exception):
class MultiChoice:
class Reorderer:
def sh(x):
def escaped_split(text, sep_char, maxsplit=-1):
def simple_parse_args_string(args_string):
def join_iters(iters):
def chunks(iter, n=0, fn=None):
def group(arr, fn):
def _is_json_task(task_name):
def __init__(self, choices):
def __contains__(self, values):
def __iter__(self):
def pattern_match(patterns, source_list):
def general_detokenize(string):
def get_rolling_token_windows(token_list, prefix_token, max_seq_len, context_len):
def make_disjoint_window(pair):
def select_continuation_from_batch_left_padding(
generations: Union[List[List[int]], torch.Tensor], max_context_size: int
):
def __init__(self, arr, fn):
def get_reordered(self):
def get_original(self, newarr):
def positional_deprecated(fn):
def _wrapper(*args, **kwargs):
def find_test_root(start_path: pathlib.Path) -> pathlib.Path:
def run_task_tests(task_list: List[str]):
def clear_torch_cache():
# Path: lm_eval/base.py
import abc
import numpy as np
import random
import re
import os
import json
import hashlib
import datasets
import torch
import torch.nn.functional as F
import warnings
from typing import Iterable
from sqlitedict import SqliteDict
from tqdm import tqdm
from accelerate import find_executable_batch_size
from lm_eval.metrics import mean, weighted_perplexity, weighted_mean, bits_per_byte
from lm_eval import utils
from abc import abstractmethod
class LM(abc.ABC):
def __init__(self):
self.cache_hook = CacheHook(None)
@abstractmethod
def loglikelihood(self, requests):
"""Compute log-likelihood of generating a continuation from a context.
Downstream tasks should attempt to use loglikelihood instead of other
LM calls whenever possible.
:param requests: list
A list of pairs (context, continuation)
context: str
Context string. Implementations of LM must be able to handle an
empty context string.
continuation: str
The continuation over which log likelihood will be calculated. If
there is a word boundary, the space should be in the continuation.
For example, context="hello" continuation=" world" is correct.
:return: list
A list of pairs (logprob, isgreedy)
logprob: float
The log probability of `continuation`
isgreedy:
Whether `continuation` would be generated by greedy sampling from `context`
"""
pass
@abstractmethod
def loglikelihood_rolling(self, requests):
"""Compute full log-likelihood of a string, with no truncation, for perplexity computation
- We will use the full max context length of the model.
- For inputs that exceed the max context length, we divide the tokenized string into chunks of up to
the max context length.
- IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations
which may simply concatenate multiple documents together.
- IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into
multiple chunks, the last input will still a full-sized context.
Example:
Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ]
Prefix: EOT
Max context length: 4
Resulting input/prediction pairs:
INPUT: EOT 0 1 2
PRED: 0 1 2 3
INPUT: 3 4 5 6
PRED: 4 5 6 7
INPUT: 5 6 7 8
PRED: 8 9
Observe that:
1. Each token is predicted exactly once
2. For the last pair, we provide the full context, but only score the last two tokens
:param requests: list
A list of strings
string: str
String for which we are computing per-toke loglikelihood
:return: list
A list of pairs (logprob, isgreedy)
logprob: float
The log probability of `continuation`
isgreedy:
Whether `continuation` would be generated by greedy sampling from `context`
"""
pass
# TODO: Add an optional max length
@abstractmethod
def greedy_until(self, requests):
"""Generate greedily until a stopping sequence
:param requests: list
A list of pairs (context, until)
context: str
Context string
until: [str]
The string sequences to generate until. These string sequences
may each span across multiple tokens, or may be part of one token.
:return: list
A list of strings continuation
continuation: str
The generated continuation.
"""
pass
@classmethod
def create_from_arg_string(cls, arg_string, additional_config=None):
additional_config = {} if additional_config is None else additional_config
| args = utils.simple_parse_args_string(arg_string) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: alextamkin/generative-elicitation
# Path: utils.py
@retry(wait=wait_random_exponential(min=1, max=60))
def query_api(messages, engine, openai_cache=None, openai_cache_file=None, **kwargs):
'''Queries the OpenAI API with the given messages.
NOTE: This function mutates the messages list to add the new_message and the response from the API.
Args:
messages (list): A list of past messages to send to the API.
openai_cache (dict, optional): The openai cache dict. Stores the API responses to avoid duplicate queries. Defaults to None.
openai_cache_file (str, optional): The path to write the cache entries to. Defaults to None.
Returns:
str: The response from the API.
'''
messages_cache_key = json.dumps(messages)
if openai_cache and messages_cache_key in openai_cache:
response = openai_cache[messages_cache_key]
else:
if "temperature" not in kwargs:
kwargs["temperature"] = 0.0
if engine == "gpt-4" or engine == "gpt-3.5-turbo":
response = openai.ChatCompletion.create(
model=engine,
messages=messages,
**kwargs
)
else:
response = openai.Completion.create(
engine=engine,
prompt=messages[0],
**kwargs
)
save_openai_cache({messages_cache_key: response}, openai_cache, openai_cache_file)
if engine == "gpt-4" or engine == "gpt-3.5-turbo":
response_text = response['choices'][0]['message']['content']
messages.append({'role': 'assistant', 'content': response_text})
else:
response_text = response['choices'][0]['text']
return response_text, response
# Path: utils.py
def load_openai_cache(openai_cache_file):
'''Loads the openai cache file into a dict.
Args:
openai_cache_file (str): The path to the openai cache file.
Returns:
dict: The openai cache dict.
'''
if not openai_cache_file:
return None
openai_cache = {}
if os.path.exists(openai_cache_file):
with open(openai_cache_file) as f:
for line in f:
openai_cache.update(json.loads(line))
return openai_cache
# Path: base_active_learning_agent.py
import json
import re
import textwrap
from abc import ABC, abstractmethod
from utils import query_api, load_openai_cache
from sklearn.metrics import roc_auc_score
class BaseActiveLearningAgent(ABC):
def __init__(self, target_specification_file, engine, openai_cache_file=None, **kwargs):
self.get_gold_domain_info(target_specification_file)
self.engine = engine
self.openai_cache_file = openai_cache_file
self.openai_cache = load_openai_cache(openai_cache_file)
self.temperature = kwargs.get("temperature", 0.0)
self.interaction_history = []
def get_gold_domain_info(self, target_specification_file):
'''Gets the gold domain specification that the model should try to learn and other associated information.
'''
gold_task = json.load(open(target_specification_file)) #"sample_tests.json"
for key in gold_task:
setattr(self, key, gold_task[key])
if key == "regex":
self.gold_regex_text = self.regex
self.gold_regex = re.compile(self.gold_regex_text)
self.persona_text = self.persona
def get_task_description(self):
return "validate an email address adheres to a specific format"
@staticmethod
def format_questions_and_answers(questions_and_answers):
'''Formats the questions and answers into a string.
Looks like:
- Should the system allow numbers in the domain? -> Yes
Args:
questions_and_answers (list): A list of tuples of the form (question, answer).
Returns:
str: The formatted questions and answers.
'''
return '\n'.join([f"- {question} -> {answer}" for question, answer in questions_and_answers])
def get_test_case_prompt(self, interaction_history, test_case):
hypothesis_prompt = textwrap.dedent('''\
{single_instance_prompt1}
{previous_examples}
{single_instance_prompt2}
{test_case}
'''
).format(
single_instance_prompt1=self.test_case_prompt[0],
previous_examples=self.format_questions_and_answers(interaction_history),
single_instance_prompt2=self.test_case_prompt[1],
test_case=test_case,
)
return [{"role": "user", "content": hypothesis_prompt}]
def generate_test_case_answer(self, test_case):
test_case_messages = self.get_test_case_prompt(self.interaction_history, test_case)
| test_case_answer, _ = query_api(test_case_messages, self.engine, self.openai_cache, self.openai_cache_file) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: bcmi/libcom
# Path: libcom/utils/model_download.py
def download_pretrained_model(weight_path):
if os.path.exists(weight_path):
assert os.path.isfile(weight_path), weight_path
return weight_path
else:
weight_path= os.path.abspath(weight_path)
model_name = os.path.basename(weight_path)
save_dir = os.path.dirname(weight_path)
download_file_from_network(model_name, save_dir)
print('Pretrained model has been stored to ', weight_path)
return weight_path
# Path: libcom/harmony_score/source/bargainnet.py
class StyleEncoder(nn.Module):
def __init__(self, style_dim, norm_layer=nn.BatchNorm2d):
super(StyleEncoder, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
ndf=64
n_layers=6
kw = 3
padw = 0
self.conv1f = PartialConv2d(3, ndf, kernel_size=kw, stride=2, padding=padw)
self.relu1 = nn.ReLU(True)
nf_mult = 1
nf_mult_prev = 1
n = 1
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
self.conv2f = PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias)
self.norm2f = norm_layer(ndf * nf_mult)
self.relu2 = nn.ReLU(True)
n = 2
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
self.conv3f = PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias)
self.norm3f = norm_layer(ndf * nf_mult)
self.relu3 = nn.ReLU(True)
n = 3
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
self.conv4f = PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias)
self.norm4f = norm_layer(ndf * nf_mult)
self.relu4 = nn.ReLU(True)
n = 4
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
self.conv5f = PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias)
self.avg_pooling = nn.AdaptiveAvgPool2d(1)
self.convs = nn.Conv2d(ndf * nf_mult, style_dim, kernel_size=1, stride=1)
def forward(self, input, mask):
"""Standard forward."""
xb = input
mb = mask
xb, mb = self.conv1f(xb, mb)
xb = self.relu1(xb)
xb, mb = self.conv2f(xb, mb)
xb = self.norm2f(xb)
xb = self.relu2(xb)
xb, mb = self.conv3f(xb, mb)
xb = self.norm3f(xb)
xb = self.relu3(xb)
xb, mb = self.conv4f(xb, mb)
xb = self.norm4f(xb)
xb = self.relu4(xb)
xb, mb = self.conv5f(xb, mb)
xb = self.avg_pooling(xb)
s = self.convs(xb)
s = torch.squeeze(s)
return s
# Path: libcom/harmony_score/harmony_score_prediction.py
import torch
import torchvision
import torch
import os
import torchvision.transforms as transforms
import math
from libcom.utils.model_download import download_pretrained_model
from libcom.utils.process_image import *
from libcom.utils.environment import *
from libcom.harmony_score.source.bargainnet import StyleEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
model_set = ['BargainNet']
class HarmonyScoreModel:
"""
Foreground object search score prediction model.
Args:
device (str | torch.device): gpu id
model_type (str): predefined model type.
kwargs (dict): other parameters for building model
Examples:
>>> from libcom import HarmonyScoreModel
>>> from libcom.utils.process_image import make_image_grid
>>> import cv2
>>> net = HarmonyScoreModel(device=0, model_type='BargainNet')
>>> test_dir = '../tests/harmony_score_prediction/'
>>> img_names = ['vaulted-cellar-247391_inharm.jpg', 'ameland-5651866_harm.jpg']
>>> vis_list,scores = [], []
>>> for img_name in img_names:
>>> comp_img = test_dir + 'composite/' + img_name
>>> comp_mask = test_dir + 'composite_mask/' + img_name
>>> score = net(comp_img, comp_mask)
>>> vis_list += [comp_img, comp_mask]
>>> scores.append(score)
>>> grid_img = make_image_grid(vis_list, text_list=[f'harmony_score:{scores[0]:.2f}', 'composite-mask', f'harmony_score:{scores[1]:.2f}', 'composite-mask'])
>>> cv2.imwrite('../docs/_static/image/harmonyscore_result1.jpg', grid_img)
Expected result:
.. image:: _static/image/harmonyscore_result1.jpg
:scale: 38 %
"""
def __init__(self, device=0, model_type='BargainNet', **kwargs):
assert model_type in model_set, f'Not implementation for {model_type}'
self.model_type = model_type
self.option = kwargs
weight_path = os.path.join(cur_dir, 'pretrained_models', 'BargainNet.pth')
| download_pretrained_model(weight_path) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: pgorecki/lato
# Path: lato/dependency_provider.py
class SimpleDependencyProvider(DependencyProvider):
"""
A dependency provider that manages dependencies and helps in automatic
dependency injection based on type or parameter name.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the DependencyProvider.
:param args: Class instances to be registered by types
:param kwargs: Dependencies to be registered by types and with explicit names
"""
self._dependencies = {}
self.update(*args, **kwargs)
def register_dependency(self, identifier: str | type, dependency: Any):
"""
Register a dependency with a given identifier (name or type).
:param identifier: The name or type to be used as an identifier for the dependency
:param dependency: The actual dependency
"""
if isinstance(identifier, type):
self._dependencies[identifier] = dependency
self._dependencies[identifier] = dependency
def has_dependency(self, identifier: str | type) -> bool:
"""
Check if a dependency with the given identifier exists.
:param identifier: Identifier for the dependency
:return: True if the dependency exists, otherwise False
"""
return identifier in self._dependencies
def get_dependency(self, identifier: str | type) -> Any:
"""
Retrieve a dependency using its identifier (name or type).
:param identifier: Identifier for the dependency
:return: The associated dependency
"""
try:
return self._dependencies[identifier]
except KeyError as e:
raise UnknownDependencyError(identifier)
def copy(self, *args, **kwargs) -> DependencyProvider:
"""
Create a copy of self with updated dependencies.
:param args: typed overrides
:param kwargs: named overrides
:return: A copy of the dependency provider
"""
dp = SimpleDependencyProvider()
dp._dependencies.update(self._dependencies)
dp.update(*args, **kwargs)
return dp
# Path: lato/dependency_provider.py
def as_type(obj: Any, cls: type) -> TypedDependency:
return TypedDependency(obj, cls)
# Path: lato/dependency_provider.py
def get_function_parameters(func) -> OrderedDict:
"""
Retrieve the function's parameters and their annotations.
:param func: The function to inspect
:return: An ordered dictionary of parameter names to their annotations
"""
handler_signature = inspect.signature(func)
kwargs_iterator = iter(handler_signature.parameters.items())
parameters = OrderedDict()
for name, param in kwargs_iterator:
parameters[name] = param.annotation
return parameters
# Path: tests/test_dependency_provider.py
import abc
from lato.dependency_provider import (
SimpleDependencyProvider,
as_type,
get_function_parameters,
)
class FooService:
...
def foo(a: int, b: str, c: FooService):
...
def test_create_provider_with_types():
foo_service = FooService()
dp = SimpleDependencyProvider(foo_service=foo_service)
assert dp[FooService] is foo_service
assert dp["foo_service"] is foo_service
def test_create_provider_with_primitive_kwarg():
dp = SimpleDependencyProvider(x=1)
assert dp["x"] == 1
def test_create_provider_with_class_instance_arg():
service = FooService()
dp = SimpleDependencyProvider(service)
assert dp[FooService] is service
def test_create_provider_with_class_instance_karg():
service = FooService()
dp = SimpleDependencyProvider(service=service)
assert dp[FooService] is service
assert dp["service"] is service
def test_create_provider_with_class_instance_arg_and_kwarg_gets_overridden():
service1 = FooService()
service2 = FooService()
dp = SimpleDependencyProvider(service1, service=service2)
assert dp[FooService] is service2
assert dp["service"] is service2
def test_resolve_custom_primitive_type():
class Email(str):
...
email = Email("john@example.com")
dp = SimpleDependencyProvider(email=email)
assert dp[Email] == email
def test_get_function_parameters():
| params = get_function_parameters(foo) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: instadeepai/flashbax
# Path: flashbax/buffers/flat_buffer.py
class ExperiencePair(NamedTuple, Generic[Experience]):
class TransitionSample(Generic[Experience]):
def validate_sample_batch_size(sample_batch_size: int, max_length: int):
def validate_min_length(min_length: int, add_batch_size: int, max_length: int):
def validate_max_length_add_batch_size(max_length: int, add_batch_size: int):
def validate_flat_buffer_args(
max_length: int,
min_length: int,
sample_batch_size: int,
add_batch_size: int,
):
def create_flat_buffer(
max_length: int,
min_length: int,
sample_batch_size: int,
add_sequences: bool,
add_batch_size: Optional[int],
) -> TrajectoryBuffer:
def sample_fn(state: TrajectoryBufferState, rng_key: PRNGKey) -> TransitionSample:
def make_flat_buffer(
max_length: int,
min_length: int,
sample_batch_size: int,
add_sequences: bool = False,
add_batch_size: Optional[int] = None,
) -> TrajectoryBuffer:
# Path: flashbax/buffers/conftest.py
def get_fake_batch(fake_transition: chex.ArrayTree, batch_size) -> chex.ArrayTree:
"""Create a fake batch with differing values for each transition."""
return jax.tree_map(
lambda x: jnp.stack([x + i for i in range(batch_size)]), fake_transition
)
# Path: flashbax/conftest.py
_DEVICE_COUNT_MOCK = 2
# Path: flashbax/buffers/flat_buffer_test.py
from copy import deepcopy
from flashbax.buffers import flat_buffer
from flashbax.buffers.conftest import get_fake_batch
from flashbax.conftest import _DEVICE_COUNT_MOCK
import chex
import jax
import jax.numpy as jnp
import pytest
# Copyright 2023 InstaDeep Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def test_add_and_can_sample(
fake_transition: chex.ArrayTree,
min_length: int,
max_length: int,
add_batch_size: int,
) -> None:
"""Check the `add` function by filling the buffer all
the way to the max_length and checking that it produces the expected behaviour .
"""
| fake_batch = get_fake_batch(fake_transition, add_batch_size) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TheDuckAI/DuckTrack
# Path: ducktrack/keycomb.py
class KeyCombinationListener:
"""
Simple and bad key combination listener.
"""
def __init__(self):
self.current_keys = set()
self.callbacks = {}
self.listener = Listener(on_press=self.on_key_press, on_release=self.on_key_release)
def add_comb(self, keys, callback):
self.callbacks[tuple([name_to_key(key_name) for key_name in sorted(keys)])] = callback
def on_key_press(self, key):
self.current_keys.add(key)
for comb, callback in self.callbacks.items():
if all(k in self.current_keys for k in comb):
return callback()
def on_key_release(self, key):
if key in self.current_keys:
self.current_keys.remove(key)
def start(self):
self.listener.start()
def stop(self):
self.listener.stop()
# Path: ducktrack/util.py
def fix_windows_dpi_scaling():
"""
Fixes DPI scaling issues with legacy windows applications
Reference: https://pynput.readthedocs.io/en/latest/mouse.html#ensuring-consistent-coordinates-between-listener-and-controller-on-windows
"""
import ctypes
PROCESS_PER_MONITOR_DPI_AWARE = 2
ctypes.windll.shcore.SetProcessDpiAwareness(PROCESS_PER_MONITOR_DPI_AWARE)
# Path: ducktrack/util.py
def get_recordings_dir() -> str:
documents_folder = Path.home() / 'Documents' / 'DuckTrack_Recordings'
return str(documents_folder)
# Path: ducktrack/util.py
def name_to_button(name: str) -> Button:
return getattr(Button, name)
# Path: ducktrack/util.py
def name_to_key(name: str) -> Key | KeyCode:
try:
return getattr(Key, name)
except AttributeError:
return KeyCode.from_char(name)
# Path: ducktrack/playback.py
import json
import math
import os
import sys
import time
import pyautogui
from pynput.keyboard import Controller as KeyboardController
from pynput.keyboard import Key
from pynput.mouse import Button
from pynput.mouse import Controller as MouseController
from .keycomb import KeyCombinationListener
from .util import (fix_windows_dpi_scaling, get_recordings_dir, name_to_button,
name_to_key)
pyautogui.PAUSE = 0
pyautogui.DARWIN_CATCH_UP_TIME = 0
class Player:
"""
Plays back recordings.
"""
def __init__(self):
self.stop_playback = False
self.listener = KeyCombinationListener()
def stop_comb_pressed():
self.stop_playback = True
return False
self.listener.add_comb(("shift", "esc"), stop_comb_pressed)
self.listener.start()
def play(self, recording_path: str):
with open(os.path.join(recording_path, "events.jsonl"), "r") as f:
events = [json.loads(line) for line in f.readlines()]
with open(os.path.join(recording_path, "metadata.json"), "r") as f:
metadata = json.load(f)
self.playback(events, metadata)
def playback(self, events: list[dict], metadata: dict):
if metadata["system"] == "Windows":
fix_windows_dpi_scaling()
mouse_controller = MouseController()
keyboard_controller = KeyboardController()
if not events:
self.listener.stop()
return
presses_to_skip = 0
releases_to_skip = 0
in_click_sequence = False
for i, event in enumerate(events):
start_time = time.perf_counter()
if self.stop_playback:
return
def do_mouse_press(button):
for j, second_event in enumerate(events[i+1:]):
# make sure the time between mouse clicks is less than 500ms
if second_event["time_stamp"] - event["time_stamp"] > 0.5:
break
if "x" in second_event and "y" in second_event:
# if the mouse moves out of the click radius/rectangle, it is not a click sequence
if math.sqrt((second_event["y"] - event["y"]) ** 2 +
(second_event["x"] - event["x"]) ** 2) > 4:
break
if second_event["action"] == "click" and second_event["pressed"]:
for k, third_event in enumerate(events[i+j+2:]):
if third_event["time_stamp"] - second_event["time_stamp"] > 0.5:
break
if "x" in third_event and "y" in third_event:
if math.sqrt((third_event["y"] - event["y"]) ** 2 +
(third_event["x"] - event["x"]) ** 2) > 5:
break
if third_event["action"] == "click" and third_event["pressed"]:
mouse_controller.click(button, 3)
return 2, 2
mouse_controller.click(button, 2)
return 1, 1
mouse_controller.press(button)
return 0, 0
if event["action"] == "move":
mouse_controller.position = (event["x"], event["y"])
elif event["action"] == "click":
button = name_to_button(event["button"])
if event["pressed"]:
if presses_to_skip == 0:
presses, releases = do_mouse_press(button)
presses_to_skip += presses
releases_to_skip += releases
if presses > 0:
in_click_sequence = True
else:
presses_to_skip -= 1
else:
if releases_to_skip == 0:
mouse_controller.release(button)
if in_click_sequence:
keyboard_controller.press(Key.shift)
mouse_controller.click(Button.left)
keyboard_controller.release(Key.shift)
in_click_sequence = False
else:
releases_to_skip -= 1
elif event["action"] == "scroll":
if metadata["system"] == "Windows":
# for some reason on windows, pynput scroll is correct but pyautogui is not
mouse_controller.scroll(metadata["scroll_direction"] * event["dx"], metadata["scroll_direction"] * event["dy"])
else:
pyautogui.hscroll(clicks=metadata["scroll_direction"] * event["dx"])
pyautogui.vscroll(clicks=metadata["scroll_direction"] * event["dy"])
elif event["action"] in ["press", "release"]:
| key = name_to_key(event["name"]) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: e4s2023/E4S2023
# Path: swap_face_fine/face_vid2vid/modules/util.py
class AntiAliasInterpolation2d(nn.Module):
"""
Band-limited downsampling, for better preservation of the input signal.
"""
def __init__(self, channels, scale):
super(AntiAliasInterpolation2d, self).__init__()
sigma = (1 / scale - 1) / 2
kernel_size = 2 * round(sigma * 4) + 1
self.ka = kernel_size // 2
self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka
kernel_size = [kernel_size, kernel_size]
sigma = [sigma, sigma]
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= torch.exp(-(mgrid - mean) ** 2 / (2 * std ** 2))
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.groups = channels
self.scale = scale
inv_scale = 1 / scale
self.int_inv_scale = int(inv_scale)
def forward(self, input):
if self.scale == 1.0:
return input
out = F.pad(input, (self.ka, self.kb, self.ka, self.kb))
out = F.conv2d(out, weight=self.weight, groups=self.groups)
out = out[:, :, ::self.int_inv_scale, ::self.int_inv_scale]
return out
# Path: swap_face_fine/face_vid2vid/modules/util.py
def make_coordinate_grid_2d(spatial_size, type):
"""
Create a meshgrid [-1,1] x [-1,1] of given spatial_size.
"""
h, w = spatial_size
x = torch.arange(w).type(type)
y = torch.arange(h).type(type)
x = (2 * (x / (w - 1)) - 1)
y = (2 * (y / (h - 1)) - 1)
yy = y.view(-1, 1).repeat(1, w)
xx = x.view(1, -1).repeat(h, 1)
meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)
return meshed
# Path: swap_face_fine/face_vid2vid/modules/model.py
from torch import nn
from swap_face_fine.face_vid2vid.modules.util import AntiAliasInterpolation2d, make_coordinate_grid_2d
from torchvision import models
from torch.autograd import grad
from torchvision import transforms
import torch
import torch.nn.functional as F
import numpy as np
import swap_face_fine.face_vid2vid.modules.hopenet as hopenet
class Vgg19(torch.nn.Module):
"""
Vgg19 network for perceptual loss.
"""
def __init__(self, requires_grad=False):
super(Vgg19, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
self.mean = torch.nn.Parameter(data=torch.Tensor(np.array([0.485, 0.456, 0.406]).reshape((1, 3, 1, 1))),
requires_grad=False)
self.std = torch.nn.Parameter(data=torch.Tensor(np.array([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1))),
requires_grad=False)
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
X = (X - self.mean) / self.std
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
class ImagePyramide(torch.nn.Module):
"""
Create image pyramide for computing pyramide perceptual loss.
"""
def __init__(self, scales, num_channels):
super(ImagePyramide, self).__init__()
downs = {}
for scale in scales:
| downs[str(scale).replace('.', '-')] = AntiAliasInterpolation2d(num_channels, scale) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: riverscn/epghub
# Path: epg/utils.py
def load_config(path: str) -> list[Channel]:
def scrap_channel(
channel: Channel, channels_config, date: date = datetime.today().date()
) -> bool:
def copy_channels(
channels: list[Channel], new_channels: list[Channel]
) -> tuple[int, set]:
def update_preview(channel: Channel) -> int:
def update_recap(channel: Channel) -> int:
def update_channel_full(channel, num_refresh_channels):
def _update_recap(channel):
def _update_preview(channel):
# Path: epg/generator/xmltv.py
def write(filepath: str, channels: list[Channel], info: str = "") -> bool:
# Path: epg/generator/diyp.py
def write(dir: str, channels: list[Channel]) -> bool:
# Path: epg/scraper/__xmltv.py
def get_channels(xmltv_url: str, dtd: etree.DTD | None = None) -> list[Channel]:
# Path: main.py
from jinja2 import Environment, FileSystemLoader
from epg import utils
from epg.generator import xmltv
from epg.generator import diyp
from epg.scraper import __xmltv
from lxml import etree
from datetime import datetime, timezone
from croniter import croniter
import os
import shutil
CF_PAGES = os.getenv("CF_PAGES")
CF_PAGES_URL = os.getenv("CF_PAGES_URL")
DEPLOY_HOOK = os.getenv("DEPLOY_HOOK")
CLOUDFLARE_API_TOKEN = os.getenv("CLOUDFLARE_API_TOKEN")
XMLTV_URL = os.getenv("XMLTV_URL", "")
TZ = os.getenv("TZ")
if TZ == None:
print(
"!!!Please set TZ environment variables to define timezone or it will use system timezone by default!!!"
)
CRON_TRIGGER = os.getenv("CRON_TRIGGER", "0 0 * * *")
next_cron_time = (
croniter(CRON_TRIGGER, datetime.now(timezone.utc))
.get_next(datetime)
.replace(tzinfo=timezone.utc)
.astimezone()
)
dtd = etree.DTD(open("xmltv.dtd", "r"))
now = datetime.now()
current_timezone = now.astimezone().tzinfo
timezone_name = current_timezone.tzname(now)
timezone_offset = now.astimezone().strftime("%z")
print("use timezone:", timezone_name, f"UTC{timezone_offset}", flush=True)
config_path = os.path.join(os.getcwd(), "config", "channels.yaml")
epg_path = os.path.join(os.getcwd(), "web", "epg.xml")
if not os.path.exists(os.path.join(os.getcwd(), "web")):
os.mkdir(os.path.join(os.getcwd(), "web"))
channels = utils.load_config(config_path)
if XMLTV_URL == "":
xml_channels = []
print("!!!Please set XMLTV_URL environment variables to reuse XML!!!")
else:
print("reuse XML:", XMLTV_URL, flush=True)
| xml_channels = __xmltv.get_channels(XMLTV_URL, dtd) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lancopku/label-words-are-anchors
# Path: icl/utils/data_wrapper.py
def sst2_wrap_data(demonstrations, input_sample, label_dict):
def trec_wrap_data(demonstrations, input_sample, label_dict):
def emo_wrap_data(demonstrations, input_sample, label_dict):
def agnews_wrap_data(demonstrations, input_sample, label_dict):
def wrap_data(demonstrations, input_sample, label_dict, task_name):
def instruct_wrapper(instruct: str, input_sample, label_dict, task_name):
def wrap_dataset(dataset: datasets.arrow_dataset.Dataset, demonstration, label_dict, task_name):
def wrap(example):
def wrap_dataset_with_instruct(dataset: datasets.arrow_dataset.Dataset, instruct, label_dict,
task_name):
def wrap(example):
def get_max_length(tokenizer):
def tokenize_dataset(dataset, tokenizer):
def tokenize_function(examples):
def remove_str_columns(dataset):
# Path: icl/utils/other.py
class TensorStrFinder:
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def find_tensor_in_tensor(self, a_tensor: Union[torch.Tensor, list], b_tensor: torch.Tensor,
return_mask=True, match_before: Optional[int] = None):
if len(b_tensor.shape) == 2:
assert b_tensor.shape[0] == 1
b_tensor = b_tensor[0]
if isinstance(a_tensor, list):
a_tensor = torch.tensor(a_tensor)
if a_tensor.device != b_tensor.device:
a_tensor = a_tensor.to(b_tensor.device)
window_size = len(a_tensor)
b_windows = b_tensor.unfold(0, window_size, 1)
matches = torch.all(b_windows == a_tensor, dim=1)
positions = torch.nonzero(matches, as_tuple=True)[0]
if return_mask:
mask = torch.zeros_like(b_tensor, dtype=torch.bool)
for pos in positions:
if match_before is None or pos + window_size <= match_before:
mask[pos:pos + window_size] = True
return mask
return positions
def find_str_in_tensor(self, s: str, t: torch.Tensor, return_mask=True, match_before=None):
s_tokens = self.tokenizer.encode(s, add_special_tokens=False)
s_tensor = torch.LongTensor(s_tokens)
return self.find_tensor_in_tensor(s_tensor, t, return_mask=return_mask,
match_before=match_before)
def get_strs_mask_in_tensor(self, list_s: List[str], t: torch.Tensor, match_before=None):
list_s_tokens = [self.tokenizer.encode(s, add_special_tokens=False) for s in list_s]
list_s_tensor = [torch.LongTensor(s_tokens) for s_tokens in list_s_tokens]
mask_tensor_list = [
self.find_tensor_in_tensor(s_tensor, t, return_mask=True, match_before=match_before) for
s_tensor in list_s_tensor]
mask_tensor = functools.reduce(torch.logical_or, mask_tensor_list)
return mask_tensor
# Path: icl/util_classes/context_solver.py
import warnings
import torch
from copy import deepcopy
from ..utils.data_wrapper import format_s_dict
from ..utils.other import TensorStrFinder
class ContextSolver:
def __init__(self, task_name, tokenizer=None):
assert task_name in ['sst2', 'trec', 'agnews', 'emo']
self.task_name = task_name
self.tokenizer = tokenizer
self.format_s = format_s_dict[task_name]
self.parse_format_s()
def parse_format_s(self):
self.X_prefix = self.format_s.split('\n')[0].split(':')[0] + ':'
self.Y_prefix = self.format_s.split('\n')[1].split(':')[0] + ':'
def get_empty_demo_context(self, context: str, only_demo_part=True):
context = context.split('\n')
for i, line in enumerate(context[:-2]):
if self.X_prefix in line:
line = self.X_prefix
elif self.Y_prefix in line:
line = line
else:
raise warnings.warn('Global prefix or other str exists!')
context[i] = line
if only_demo_part:
context = context[:-2]
context = '\n'.join(context)
return context
def get_mask_strings_and_match_before(self, context, input_ids, tokenizer=None):
if tokenizer is None:
tokenizer = self.tokenizer
poss = torch.where(input_ids == tokenizer.encode('\n', add_special_tokens=False)[0])[0]
if len(poss) >= 2:
match_before = poss[-2] + 1
else:
match_before = None
list_s = []
list_s.append(self.X_prefix)
list_s.append('\n' + self.X_prefix)
context = context.split('\n')
for i, line in enumerate(context[:-2]):
if self.X_prefix in line:
pass
elif self.Y_prefix in line:
list_s.append('\n' + line)
list_s.append('\n' + line + '\n')
else:
raise warnings.warn('Global prefix or other str exists!')
return list_s, match_before
def get_mask(self, input_ids, tokenizer=None):
if isinstance(input_ids, list):
input_ids = torch.tensor(input_ids)
if len(input_ids.shape) == 2:
assert input_ids.shape[0] == 1
input_ids = input_ids[0]
if tokenizer is None:
tokenizer = self.tokenizer
context = tokenizer.decode(input_ids)
list_s, match_before = self.get_mask_strings_and_match_before(context, input_ids=input_ids,
tokenizer=tokenizer)
| tensor_str_finder = TensorStrFinder(tokenizer=tokenizer) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Aggify/aggify
# Path: aggify/aggify.py
def last_out_stage_check(method: AggifyType) -> AggifyType:
def decorator(*args, **kwargs):
def __init__(self, base_model: Type[Document]):
def __iter__(self):
def project(self, **kwargs: QueryParams) -> "Aggify":
def group(self, expression: Union[str, Dict, List, None] = "id") -> "Aggify":
def order_by(self, *order_fields: Union[str, List[str]]) -> "Aggify":
def raw(self, raw_query: dict) -> "Aggify":
def add_fields(self, **fields) -> "Aggify": # noqa
def filter(
self, arg: Union[Q, None] = None, **kwargs: Union[QueryParams, F, list]
) -> "Aggify":
def out(self, coll: str, db: Union[str, None] = None) -> "Aggify":
def __to_aggregate(self, query: Dict[str, Any]) -> None:
def __getitem__(self, index: Union[slice, int]) -> "Aggify":
def unwind(
self,
path: str,
include_array_index: Union[str, None] = None,
preserve: bool = False,
) -> "Aggify":
def annotate(
self,
annotate_name: Union[str, None] = None,
accumulator: Union[str, None] = None,
f: Union[Union[str, Dict], F, int, None] = None,
**kwargs,
) -> "Aggify":
def _get_field_type_and_accumulator(
accumulator: str,
) -> Tuple[Type, str]:
def _get_annotate_value(self, f: Union[F, str]) -> Union[Dict, str]:
def _do_annotate_with_expression(
annotate: Dict[str, Dict[str, Any]], base_model_fields: Dict[str, Any]
) -> Tuple[Dict[str, Dict[str, Any]], List[str]]:
def __match(self, matches: Dict[str, Any]):
def __lookup(
from_collection: str, local_field: str, as_name: str, foreign_field: str = "_id"
) -> Dict[str, Dict[str, str]]:
def __combine_sequential_matches(self) -> List[Dict[str, Union[dict, Any]]]:
def get_field_name_recursively(
self, field: str, base: Union[CollectionType, None] = None
) -> str:
def lookup(
self,
from_collection: CollectionType,
as_name: str,
query: Union[List[Q], Union[Q, None], List["Aggify"]] = None,
let: Union[List[str], None] = None,
local_field: Union[str, None] = None,
foreign_field: Union[str, None] = None,
raw_let: Union[Dict, None] = None,
) -> "Aggify":
def get_model_field(model: Type[Document], field: str) -> mongoengine_fields:
def _replace_base(self, embedded_field) -> str:
def replace_root(
self, *, embedded_field: str, merge: Union[Dict, None] = None
) -> "Aggify":
def replace_with(
self, *, embedded_field: str, merge: Union[Dict, None] = None
) -> "Aggify":
def redact(self, value1, condition, value2, then_value, else_value):
def clean_then_else(_then_value, _else_value):
class Aggify:
# Path: aggify/exceptions.py
class InvalidOperator(AggifyBaseException):
def __init__(self, operator: str):
self.message = f"Operator {operator} does not exists, please refer to documentation to see all supported operators."
super().__init__(self.message)
# Path: tests/test_aggify.py
class BaseModel(Document):
# Define your fields here
name = StringField(max_length=100)
age = IntField()
meta = {"allow_inheritance": True, "abstract": True}
# Path: tests/test_q.py
import pytest
from aggify import Q, F, Aggify
from aggify.exceptions import InvalidOperator
from tests.test_aggify import BaseModel
class TestQ:
# Test OR operator with multiple conditions
def test_or_operator_with_multiple_conditions(self):
q1 = Q(name="John")
q2 = Q(name="Alice")
q_combined = q1 | q2
assert dict(q_combined) == {
"$match": {"$or": [dict(q1)["$match"], dict(q2)["$match"]]}
}
def test_or_operator_with_multiple_conditions_more_than_rwo(self):
q1 = Q(name="John")
q2 = Q(name="Alice")
q3 = Q(name="Bob")
q_combined = q1 | q2 | q3
assert dict(q_combined) == {
"$match": {
"$or": [dict(q1)["$match"], dict(q2)["$match"], dict(q3)["$match"]]
}
}
def test_and(self):
q1 = Q(name="Mahdi")
q2 = Q(age__gt=20)
q = q1 & q2
assert dict(q) == {"$match": {"$and": [dict(q1)["$match"], dict(q2)["$match"]]}}
def test_multiple_and(self):
q1 = Q(name="Mahdi")
q2 = Q(age__gt=20)
q3 = Q(age__lt=30)
q = q1 & q2 & q3
assert dict(q) == {
"$match": {
"$and": [dict(q1)["$match"], dict(q2)["$match"], dict(q3)["$match"]]
}
}
# Test combining NOT operators with AND
def test_combine_not_operators_with_and(self):
q1 = Q(name="John")
q2 = Q(age__lt=30)
q_combined = ~q1 & ~q2
assert dict(q_combined) == {
"$match": {
"$and": [{"$not": [dict(q1)["$match"]]}, {"$not": [dict(q2)["$match"]]}]
}
}
# Test combining NOT operators with OR
def test_combine_not_operators_with_or(self):
q1 = Q(name="John")
q2 = Q(age__lt=30)
q_combined = ~q1 | ~q2 # Changed | to combine OR
assert dict(q_combined) == {
"$match": {
"$or": [{"$not": [dict(q1)["$match"]]}, {"$not": [dict(q2)["$match"]]}]
}
}
def test_unsuitable_key_for_f(self):
| with pytest.raises(InvalidOperator): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: sotopia-lab/sotopia
# Path: sotopia/database/persistent_profile.py
class AgentProfile(JsonModel):
first_name: str = Field(index=True)
last_name: str = Field(index=True)
age: int = Field(index=True, default_factory=lambda: 0)
occupation: str = Field(index=True, default_factory=lambda: "")
gender: str = Field(index=True, default_factory=lambda: "")
gender_pronoun: str = Field(index=True, default_factory=lambda: "")
public_info: str = Field(index=True, default_factory=lambda: "")
big_five: str = Field(index=True, default_factory=lambda: "")
moral_values: list[str] = Field(index=False, default_factory=lambda: [])
schwartz_personal_values: list[str] = Field(
index=False, default_factory=lambda: []
)
personality_and_values: str = Field(index=True, default_factory=lambda: "")
decision_making_style: str = Field(index=True, default_factory=lambda: "")
secret: str = Field(default_factory=lambda: "")
model_id: str = Field(default_factory=lambda: "")
# Path: sotopia/database/persistent_profile.py
class RelationshipType(IntEnum):
stranger = 0
know_by_name = 1
acquaintance = 2
friend = 3
romantic_relationship = 4
family_member = 5
# Path: sotopia/envs/parallel.py
def get_bio(
relationship: RelationshipType, profile: AgentProfile, agent_id: int
) -> str:
match relationship:
case RelationshipType.stranger:
return _agent_profile_to_stranger_self(profile, agent_id=agent_id)
case RelationshipType.know_by_name:
return _agent_profile_to_name_self(profile, agent_id=agent_id)
case RelationshipType.acquaintance:
return _agent_profile_to_aquaintance_self(
profile, agent_id=agent_id
)
case RelationshipType.friend | RelationshipType.romantic_relationship | RelationshipType.family_member:
return _agent_profile_to_friendabove_self(
profile, agent_id=agent_id
)
case _:
raise ValueError(f"Unknown relationship {relationship}")
# Path: sotopia/envs/parallel.py
@configurable
def render_text_for_agent(
raw_text: str,
agent_id: int,
tags_to_render: list[str] = [
"extra_info",
"clarification_hint",
"strategy_hint",
],
) -> str:
return XMLRenderer()(
raw_text,
RenderContext(
viewer=f"agent_{agent_id}", tags_to_render=tags_to_render
),
)
# Path: tests/envs/test_get_bio.py
from typing import Any
from sotopia.database.persistent_profile import (
AgentProfile,
RelationshipType,
)
from sotopia.envs.parallel import get_bio, render_text_for_agent
import pytest
@pytest.fixture
def _get_john_profile() -> AgentProfile:
return AgentProfile(
first_name="John",
last_name="Doe",
personality_and_values="I am a big five",
public_info="I am a public info",
secret="I am a secret",
)
def test_get_bio(_get_john_profile: Any) -> None:
john_profile = _get_john_profile
background = get_bio(
| RelationshipType.stranger, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Zai-Kun/reverse-engineered-chatgpt
# Path: re_gpt/errors.py
class BackendError(Exception):
def __init__(self, error_code):
self.error_code = error_code
self.message = (
f"An error occurred on the backend. Error code: {self.error_code}"
)
super().__init__(self.message)
# Path: re_gpt/errors.py
class InvalidSessionToken(Exception):
def __init__(self):
self.message = "Invalid session token provided."
super().__init__(self.message)
# Path: re_gpt/errors.py
class RetryError(Exception):
def __init__(self, website, message="Exceeded maximum retries"):
self.website = website
self.message = f"{message} for website: {website}"
super().__init__(self.message)
# Path: re_gpt/errors.py
class TokenNotProvided(Exception):
def __init__(self):
self.message = "Token not provided. Please pass your '__Secure-next-auth.session-token' as an argument (e.g., ChatGPT.init(session_token=YOUR_TOKEN))."
super().__init__(self.message)
# Path: re_gpt/errors.py
class UnexpectedResponseError(Exception):
def __init__(self, original_exception, server_response):
self.original_exception = original_exception
self.server_response = server_response
self.message = f"An unexpected error occurred. Error message: {self.original_exception}.\nThis is what the server returned: {self.server_response}."
super().__init__(self.message)
# Path: re_gpt/errors.py
class InvalidModelName(Exception):
def __init__(self, model, avalible_models):
self.model = model
self.avalible_models = avalible_models
self.message = f'"{model}" is not a valid model. Avalible models: {[model for model in avalible_models]}'
super().__init__(self.message)
# Path: re_gpt/utils.py
async def async_get_binary_path(session):
if binary_path is None:
return None
if not os.path.exists(funcaptcha_bin_folder_path) or not os.path.isdir(
funcaptcha_bin_folder_path
):
os.mkdir(funcaptcha_bin_folder_path)
if os.path.isfile(binary_path):
try:
local_binary_hash = calculate_file_md5(binary_path)
response = await session.get(latest_release_url)
json_data = response.json()
for line in json_data["body"].splitlines():
if line.startswith(current_os):
latest_binary_hash = line.split("=")[-1]
break
if local_binary_hash != latest_binary_hash:
file_url = get_file_url(json_data)
await async_download_binary(session, binary_path, file_url)
except:
return binary_path
else:
response = await session.get(latest_release_url)
json_data = response.json()
file_url = get_file_url(json_data)
await async_download_binary(session, binary_path, file_url)
return binary_path
# Path: re_gpt/utils.py
def get_model_slug(chat):
for _, message in chat.get("mapping", {}).items():
if "message" in message:
role = message["message"]["author"]["role"]
if role == "assistant":
return message["message"]["metadata"]["model_slug"]
# Path: re_gpt/async_chatgpt.py
import asyncio
import ctypes
import inspect
import json
import uuid
from typing import AsyncGenerator, Callable, Optional
from curl_cffi.requests import AsyncSession
from .errors import (
BackendError,
InvalidSessionToken,
RetryError,
TokenNotProvided,
UnexpectedResponseError,
InvalidModelName,
)
from .utils import async_get_binary_path, get_model_slug
# Constants
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36"
CHATGPT_API = "https://chat.openai.com/backend-api/{}"
BACKUP_ARKOSE_TOKEN_GENERATOR = "https://arkose-token-generator.zaieem.repl.co/token"
MODELS = {
"gpt-4": {"slug": "gpt-4", "needs_arkose_token": True},
"gpt-3.5": {"slug": "text-davinci-002-render-sha", "needs_arkose_token": False},
}
class AsyncConversation:
def __init__(self, chatgpt, conversation_id=None, model=None):
self.chatgpt = chatgpt
self.conversation_id = conversation_id
self.parent_id = None
self.model = model
async def fetch_chat(self) -> dict:
"""
Fetches the chat of the conversation from the API.
Returns:
dict: The JSON response from the API containing the chat if the conversation_id is not none, else returns an empty dict.
Raises:
UnexpectedResponseError: If the response is not a valid JSON object or if the response json is not in the expected format
"""
if not self.conversation_id:
return {}
url = CHATGPT_API.format(f"conversation/{self.conversation_id}")
response = await self.chatgpt.session.get(
url=url, headers=self.chatgpt.build_request_headers()
)
error = None
try:
chat = response.json()
self.parent_id = list(chat.get("mapping", {}))[-1]
model_slug = get_model_slug(chat)
self.model = [
key for key, value in MODELS.items() if value["slug"] == model_slug
][0]
except Exception as e:
error = e
if error is not None:
| raise UnexpectedResponseError(error, response.text) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: qualabs/video-headline
# Path: api/serializers/bills.py
class MinBillSerializer(serializers.ModelSerializer):
plan = serializers.CharField(source='plan.name')
class Meta:
model = Bill
fields = (
'id',
'plan',
'date'
)
# Path: api/serializers/bills.py
class BillSerializer(serializers.ModelSerializer):
plan = PlanSerializer()
extras = serializers.JSONField()
class Meta:
model = Bill
fields = (
'id',
'organization',
'plan',
'date',
'last_modified',
'video_transcoding',
'audio_transcoding',
'storage',
'data_transfer',
'extras'
)
# Path: organization/models/bill.py
class Bill(models.Model):
organization = models.ForeignKey(Organization,
models.CASCADE,
related_name='bills',
verbose_name='Organization')
plan = models.ForeignKey(Plan,
models.PROTECT,
null=True,
default=None,
related_name='bills',
verbose_name='Plan')
date = models.DateField(verbose_name='Creation Date')
last_modified = models.DateTimeField(auto_now=True,
verbose_name='Updated Date')
video_transcoding = models.FloatField(default=0,
verbose_name='Video Transcoding Minutes')
audio_transcoding = models.FloatField(default=0,
verbose_name='Audio Transcoding Minutes')
storage = models.FloatField(default=0,
verbose_name='Storage (GB)')
data_transfer = models.FloatField(default=0,
verbose_name='Traffic (GB)')
extras = JSONField(blank=True,
default=dict,
verbose_name='Extra information')
def __str__(self):
date = self.date.strftime('%b-%Y')
return f'{self.organization.name} - {date}'
class Meta:
verbose_name = 'Usage Report'
verbose_name_plural = 'Usage Reports'
unique_together = ('date', 'organization')
def save(self, *args, **kwargs):
self.date = self.date.replace(day=1)
super(Bill, self).save(*args, **kwargs)
def is_current_bill(self):
today = timezone.now()
return (today.year == self.date.year) and (today.month == self.date.month)
# Path: test_utils.py
def create_user(username, password, organization):
user = Account.objects.create_user(
username=username,
password=password,
organization=organization,
email=f'{username}@admin.com'
)
return user
# Path: test_utils.py
def create_superuser(username, password, organization):
su = Account.objects.create_superuser(
username=username,
password=password,
organization=organization,
email='admin@admin.com'
)
return su
# Path: test_utils.py
def create_key(name, user):
key = APIKey.objects.create(
name=f'{name}',
account=user,
)
return key
# Path: test_utils.py
def create_organizations(name, org_quantity, bucket_name='', contact_email='', cf_id='',
cf_domain='', plan=None, config=None):
organizations = []
for number in range(1, org_quantity + 1):
org = Organization.objects.create(
name=f'{name} {number}',
bucket_name=bucket_name,
contact_email=contact_email,
cf_id=cf_id,
cf_domain=cf_domain,
plan=plan,
config=config if config else {}
)
organizations.append(org)
return organizations
# Path: test_utils.py
def create_plans(name, quantity, description='', storage=0, video_transcoding=0, audio_transcoding=0, data_transfer=0):
plans = []
for number in range(1, quantity + 1):
plan = Plan.objects.create(
name=f'{name} {number}',
description=description,
storage=storage,
video_transcoding=video_transcoding,
audio_transcoding=audio_transcoding,
data_transfer=data_transfer
)
plans.append(plan)
return plans
# Path: test_utils.py
def create_bill(organization, plan, month=date.today().replace(day=1), storage=0, video_transcoding=0,
data_transfer=0):
bill = Bill.objects.create(
organization=organization,
plan=plan,
date=month,
storage=storage,
video_transcoding=video_transcoding,
data_transfer=data_transfer
)
return bill
# Path: api/tests/bills.py
import logging
from datetime import date
from django.utils import timezone
from dateutil.relativedelta import relativedelta
from unittest import mock
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from api.serializers import MinBillSerializer, BillSerializer
from organization.models import Bill
from test_utils import create_user, create_superuser, create_key, create_organizations, \
create_plans, create_bill
class BillTests(APITestCase):
@classmethod
def setUpClass(cls):
logging.disable(logging.WARNING)
cls.org1, cls.org2 = create_organizations('Organization', 2)
cls.user1 = create_user('user1', '12345678', cls.org1)
cls.user2 = create_user('user2', '12345678', cls.org2)
cls.su = create_superuser('admin', '12345678', cls.org1)
cls.key = create_key('key', cls.user1)
cls.plan1, cls.plan2 = create_plans('Plan', 2)
def setUp(self):
self.bill1 = create_bill(self.org1, self.plan1, date.today().replace(day=1))
self.bill2 = create_bill(self.org2, self.plan1, date.today().replace(day=1))
self.bill3 = create_bill(self.org1, self.plan1,
date.today().replace(day=1) - relativedelta(months=1))
def tearDown(self):
| Bill.objects.all().delete() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: LAION-AI/Text-to-speech
# Path: modules/common.py
class Base:
MODEL_CHOICES = {}
def __init__(
self,
model_choice: str,
sampling_rate: int = 16000,
padding: Union[bool, str] = True,
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
max_audio_len: int = 5,
**kwargs,
) -> None:
self.model_choice = model_choice.lower()
assert (
self.model_choice in self.MODEL_CHOICES
), f"Unrecognized model choice {self.model_choice}"
model = self.MODEL_CHOICES[self.model_choice]
if isinstance(model, dict):
self.model = {}
for key, value in model.items():
if key in ["target"]:
continue
self.model[key] = value(**kwargs)
elif isinstance(model, partial):
self.model = model(**kwargs)
else:
raise NotImplementedError("Not sure how to handle this model choice")
self.sampling_rate = sampling_rate
self.padding = padding
self.max_length = max_length
self.pad_to_multiple_of = pad_to_multiple_of
self.max_audio_len = max_audio_len
self.__post__init__()
def __post__init__(self):
for key, value in self.MODEL_CHOICES.items():
if (
isinstance(value, dict)
and "target" in value
and isinstance(value["target"], str)
):
self.MODEL_CHOICES[key]["target"] = getattr(self, value["target"])
@abstractmethod
def predict(self, **kwargs):
self.model(**kwargs)
def __call__(
self, audio_path: str = None, audio: torch.Tensor = None, **kwargs
) -> Any:
assert exists(audio_path) or exists(
audio
), "Either audio_path or audio tensor is required"
if isinstance(self.model, dict):
prediction = self.MODEL_CHOICES[self.model_choice]["target"](
audio_path=audio_path, audio=audio, **kwargs
)
else:
prediction = self.predict(audio_path=audio_path, audio=audio, **kwargs)
return prediction
def save_to_file(self, audio, sr, save_dir, start_dur=None, stop_dur=None):
# Handling audio with more than 2 dimensions
if audio.ndim > 2:
print(f"Warning: Audio has {audio.ndim} dimensions, averaging over channels for simplicity.")
audio = torch.mean(audio, dim=-1)
if exists(start_dur):
start_sample = round(start_dur * sr)
audio = audio[start_sample:]
if exists(stop_dur):
stop_sample = round(stop_dur * sr)
audio = audio[:stop_sample]
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if audio.ndim == 1:
audio = audio.unsqueeze(0)
save_path = (
os.path.join(save_dir, f"{str(uuid4())}.wav")
if not os.path.splitext(save_dir)[-1]
else save_dir
)
audio_ops.save_audio(wav=audio, path=save_path, sr=sr)
return save_path
# Path: modules/audio_superres_utils.py
def load_audiosr(args):
return build_model(args.model_name, device=args.device)
# Path: config/conf.py
DIR_PATH = osp.dirname(osp.realpath(__file__))
ROOT_PATH = osp.abspath(osp.join(osp.dirname(__file__), ".." + osp.sep))
# Path: modules/audio_superres.py
from os import path as osp
from pathlib import Path
from audiosr import super_resolution
from functools import partial
from .common import Base
from modules.audio_superres_utils import load_audiosr
from voicefixer import VoiceFixer
from config import settings
import os
import argparse
cache_dir = osp.join(settings.CACHE_DIR, "weights", "enhancement")
class SuperResAudio(Base):
MODEL_CHOICES = {
"audiosr": {
"model": partial(
| load_audiosr, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Qualcomm-AI-research/geometric-algebra-transformer
# Path: gatr/primitives/bilinear.py
@lru_cache()
def _load_bilinear_basis(
kind: str, device=torch.device("cpu"), dtype=torch.float32
) -> torch.Tensor:
"""Loads basis elements for Pin-equivariant bilinear maps between multivectors.
This function is cached.
Parameters
----------
kind : {"gp", "outer"}
Filename of the basis file, assumed to be found in __file__ / data
device : torch.Device or str
Device
dtype : torch.Dtype
Data type
Returns
-------
basis : torch.Tensor with shape (num_basis_elements, 16, 16, 16)
Basis elements for bilinear equivariant maps between multivectors.
"""
# To avoid duplicate loading, base everything on float32 CPU version
if device not in [torch.device("cpu"), "cpu"] and dtype != torch.float32:
basis = _load_bilinear_basis(kind)
else:
filename = Path(__file__).parent.resolve() / "data" / _FILENAMES[kind]
sparse_basis = torch.load(filename).to(torch.float32)
# Convert to dense tensor
# The reason we do that is that einsum is not defined for sparse tensors
basis = sparse_basis.to_dense()
return basis.to(device=device, dtype=dtype)
# Path: gatr/primitives/linear.py
@lru_cache()
def _compute_reversal(device=torch.device("cpu"), dtype=torch.float32) -> torch.Tensor:
"""Constructs a matrix that computes multivector reversal.
Parameters
----------
device : torch.device
Device
dtype : torch.dtype
Dtype
Returns
-------
reversal_diag : torch.Tensor with shape (16,)
The diagonal of the reversal matrix, consisting of +1 and -1 entries.
"""
reversal_flat = torch.ones(16, device=device, dtype=dtype)
reversal_flat[5:15] = -1
return reversal_flat
# Path: gatr/primitives/linear.py
def grade_project(x: torch.Tensor) -> torch.Tensor:
"""Projects an input tensor to the individual grades.
The return value is a single tensor with a new grade dimension.
NOTE: this primitive is not used widely in our architectures.
Parameters
----------
x : torch.Tensor with shape (..., 16)
Input multivector.
Returns
-------
outputs : torch.Tensor with shape (..., 5, 16)
Output multivector. The second-to-last dimension indexes the grades.
"""
# Select kernel on correct device
basis = _compute_pin_equi_linear_basis(device=x.device, dtype=x.dtype, normalize=False)
# First five basis elements are grade projections
basis = basis[:5]
# Project to grades
projections = cached_einsum("g i j, ... j -> ... g i", basis, x)
return projections
# Path: gatr/utils/einsum.py
def cached_einsum(equation: str, *operands: torch.Tensor) -> torch.Tensor:
"""Computes einsum with a cached optimal contraction.
Inspired by upstream
https://github.com/pytorch/pytorch/blob/v1.13.0/torch/functional.py#L381.
"""
op_shape = tuple(op.shape for op in operands)
path = _get_cached_path_for_equation_and_shapes(equation=equation, op_shape=op_shape)
return custom_einsum(equation, *operands, path=path)
# Path: gatr/primitives/invariants.py
from functools import lru_cache
from gatr.primitives.bilinear import _load_bilinear_basis
from gatr.primitives.linear import _compute_reversal, grade_project
from gatr.utils.einsum import cached_einsum
import torch
import torch.linalg
# Copyright (c) 2023 Qualcomm Technologies, Inc.
# All rights reserved.
@lru_cache()
def compute_inner_product_mask(device=torch.device("cpu")) -> torch.Tensor:
"""Constructs a bool array for the inner product calculation.
The inner product of MVs is <~x y>_0, i.e. take the grade-0 component of the geometric
product of the reverse of x with y.
Both the scalar component of the GP, and the reversal matrix, are diagonal.
Their product is 0 for basis elements involving e0, and 1 elsewhere, i.e.
IP = [1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0]
for dim order '', 'e0', 'e1', 'e2', 'e3', 'e01', 'e02', 'e03', 'e12', 'e13', 'e23',
'e012', 'e013', 'e023', 'e123', 'e0123'
Parameters
----------
device : torch.device
Device
Returns
-------
ip_mask : torch.Tensor with shape (16,)
Inner product mask
"""
gp = _load_bilinear_basis("gp", device=device, dtype=torch.float32)
| inner_product_mask = torch.diag(gp[0]) * _compute_reversal(device=device, dtype=torch.float32) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: StanislavPetrovV/Wolfenstein-3D-Clone
# Path: game_objects/game_object.py
class GameObject:
def __init__(self, level_map, tex_id, x, z):
self.eng = level_map.eng
self.app = self.eng.app
self.tex_id = tex_id
#
self.pos = glm.vec3(x + H_WALL_SIZE, 0, z + H_WALL_SIZE) # center of the tile
self.rot = 0
self.scale = glm.vec3(1)
#
self.m_model: glm.mat4 = None
def get_model_matrix(self):
m_model = glm.translate(glm.mat4(), self.pos)
m_model = glm.rotate(m_model, self.rot, glm.vec3(0, 1, 0))
m_model = glm.scale(m_model, self.scale)
return m_model
# Path: meshes/quad_mesh.py
class QuadMesh:
def __init__(self, eng, shader_program):
self.eng = eng
self.ctx = eng.ctx
self.program = shader_program
self.vbo_format = '4f 2f'
self.vbo_attrs = ('in_position', 'in_uv')
self.vao = self.get_vao()
def get_vao(self):
vertex_data = self.get_vertex_data()
vbo = self.ctx.buffer(vertex_data)
vao = self.ctx.vertex_array(
self.program,
[
(vbo, self.vbo_format, *self.vbo_attrs)
],
skip_errors=True
)
return vao
def render(self):
self.vao.render()
def get_vertex_data(self):
vert_position = (
[-0.5, 0.0, 0.0, 1.0], [-0.5, 1.0, 0.0, 1.0],
[ 0.5, 1.0, 0.0, 1.0], [ 0.5, 0.0, 0.0, 1.0]
)
uv_coords = (
[1, 1], [1, 0], [0, 0], [0, 1]
)
vert_indices = [
0, 2, 1, 0, 3, 2
]
vert_data = []
for vert_index in vert_indices:
vert_data += vert_position[vert_index]
vert_data += uv_coords[vert_index]
vert_data = np.array(vert_data, dtype='float32')
return vert_data
# Path: game_objects/weapon.py
from game_objects.game_object import GameObject
from meshes.quad_mesh import QuadMesh
from settings import *
class Weapon:
def __init__(self, eng):
self.eng = eng
self.app = eng.app
# refer to the player
self.player = self.eng.player
self.weapon_id = self.player.weapon_id
self.player.weapon_instance = self
#
self.pos = WEAPON_POS
self.rot = 0
self.scale = glm.vec3(WEAPON_SCALE / ASPECT_RATIO, WEAPON_SCALE, 0)
| self.m_model = GameObject.get_model_matrix(self) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: tomguluson92/cloth2tex
# Path: lib/mesh_sampling.py
def generate_transform_matrices(mesh, factors):
"""Generates len(factors) meshes, each of them is scaled by factors[i] and
computes the transformations between them.
Returns:
M: a set of meshes downsampled from mesh by a factor specified in factors.
A: Adjacency matrix for each of the meshes
D: Downsampling transforms between each of the meshes
U: Upsampling transforms between each of the meshes
"""
factors = map(lambda x: 1.0 / x, factors)
M, A, D = [], [], []
# M, A, D, U = [], [], [], []
A.append(get_vert_connectivity(mesh.v, mesh.f).tocoo())
M.append(mesh)
for i,factor in enumerate(factors):
ds_f, ds_D = qslim_decimator_transformer(M[-1], factor=factor)
D.append(ds_D.tocoo())
new_mesh_v = ds_D.dot(M[-1].v)
new_mesh = Mesh(v=new_mesh_v, f=ds_f)
M.append(new_mesh)
A.append(get_vert_connectivity(new_mesh.v, new_mesh.f).tocoo())
return M, A, D
# return M, A, D, U
# Path: lib/mesh_sampling.py
def generate_transform_matrices_coma(mesh, factors):
"""Generates len(factors) meshes, each of them is scaled by factors[i] and
computes the transformations between them.
Returns:
M: a set of meshes downsampled from mesh by a factor specified in factors.
A: Adjacency matrix for each of the meshes
D: csc_matrix Downsampling transforms between each of the meshes
U: Upsampling transforms between each of the meshes
F: a list of faces
"""
factors = map(lambda x: 1.0 / x, factors)
M, A, D, U, F = [], [], [], [], []
F.append(mesh.f) # F[0]
A.append(get_vert_connectivity(mesh.v, mesh.f).astype('float32')) # A[0]
M.append(mesh) # M[0]
for factor in factors:
ds_f, ds_D = qslim_decimator_transformer(M[-1], factor=factor)
D.append(ds_D.astype('float32'))
new_mesh_v = ds_D.dot(M[-1].v)
new_mesh = Mesh(v=new_mesh_v, f=ds_f)
F.append(new_mesh.f)
M.append(new_mesh)
A.append(
get_vert_connectivity(new_mesh.v, new_mesh.f).tocoo())
U.append(setup_deformation_transfer(M[-1], M[-2]).astype('float32'))
return M, A, D, U, F
# Path: lib/utils_dg.py
def col(A):
return A.reshape((-1, 1))
# Path: lib/utils_dg.py
def batch_rodrigues(axisang):
# This function is borrowed from https://github.com/MandyMo/pytorch_HMR/blob/master/src/util.py#L37
# axisang N x 3
axisang_norm = torch.norm(axisang + 1e-8, p=2, dim=1)
angle = torch.unsqueeze(axisang_norm, -1)
axisang_normalized = torch.div(axisang, angle)
angle = angle * 0.5
v_cos = torch.cos(angle)
v_sin = torch.sin(angle)
quat = torch.cat([v_cos, v_sin * axisang_normalized], dim=1)
rot_mat = quat2mat(quat)
# rot_mat = rot_mat.view(rot_mat.shape[0], 9)
return rot_mat
# Path: lib/deformation_graph.py
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.autograd.functional as F
import pickle
from scipy.spatial import KDTree
from psbody.mesh import Mesh
from .mesh_sampling import generate_transform_matrices, generate_transform_matrices_coma
from .utils_dg import col, batch_rodrigues
from pytorch3d.io import load_obj, load_objs_as_meshes, save_obj
# coding: UTF-8
"""
@date: 2023.02.21-28 week8-9
@func: deformation graph.
"""
eps = sys.float_info.epsilon # 2.220446049250313e-16
class DeformationGraph(nn.Module):
def __init__(self, vert_number=9648, radius=0.015, k=9, sampling_strategy='qslim'):
super().__init__()
self.radius = radius
self.k = k
self.max_neigh_num = 40
self.sampling_strategy = sampling_strategy
self.one_ring_neigh = []
self.nodes_idx = None
self.weights = None
self.influence_nodes_idx = []
self.dists = []
self.vert_number = vert_number
def construct_graph(self, category_name, vertices=None, faces=None):
transform_fp = "transform_{}.pkl".format(category_name)
if self.sampling_strategy == 'qslim':
m = Mesh(v=vertices, f=faces)
if os.path.exists(transform_fp):
with open(transform_fp, 'rb') as f:
tmp = pickle.load(f, encoding='latin1')
M, A, D = tmp['M'], tmp['A'], tmp['D']
else:
| M, A, D = generate_transform_matrices(m, [20, 20]) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: amazon-science/cceval
# Path: eval_utils.py
def postprocess_code_lines(prompt, completion, parser, lang):
try:
if lang in ["java", "csharp", "typescript"]:
return get_bracket_lang_statement(completion)
elif lang == "python":
return get_python_one_statement(prompt, completion, parser)
except Exception as e:
return completion
# Path: eval_utils.py
def extract_identifiers(source_code, lang):
# the main idea is to remove String from a source code
# then, tokenize the code to get all words and match with identifier regular expression
# check if it is a language specific keyword, it not, then it is an identifier
source_code_without_strings = re.sub(string_pattern, '', source_code)
_ids = [t for t in code_tokenizer.tokenize(source_code_without_strings) if is_identifier(t, lang)]
return _ids
# Path: eval_utils.py
def cal_edit_sim(references, hypotheses):
total = len(references)
edit_sim = 0.0
for pred, gt in zip(hypotheses, references):
pred = pred.strip()
gt = gt.strip()
edit_sim += fuzz.ratio(pred, gt)
return edit_sim / total
# Path: eval_utils.py
def remove_comments(code):
code = re.sub(r'#.*', '', code)
code = re.sub(r'//.*', '', code)
return code
# Path: eval_metric.py
import json
import torch.multiprocessing as mp
from functools import partial
from tqdm import tqdm
from tree_sitter import Language, Parser
from eval_utils import (
postprocess_code_lines,
extract_identifiers,
cal_edit_sim,
remove_comments
)
parser = None
def compute_id_match(pred_ids, target_ids):
pred_ids = list(set(pred_ids))
target_ids = list(set(target_ids))
tp = 0
fp = 0
fn = 0
for pid in pred_ids:
if pid in target_ids:
tp += 1
else:
fp += 1
for tid in target_ids:
if tid not in pred_ids:
fn += 1
return tp, fp, fn
def compute_edit_sim(samples):
refs, hyps = [], []
for s in samples:
refs.append(s["target"])
hyps.append(s["pred"])
return cal_edit_sim(refs, hyps)
def process_examples(lang, args):
sample, ex = args
global parser
prediction = postprocess_code_lines(ex["prompt"], sample["pred"], parser, lang)
prediction = remove_comments(prediction)
target = ex["groundtruth"]
target = remove_comments(target)
pred_lines = [l.strip() for l in prediction.split("\n") if l.strip()]
gt_lines = [l.strip() for l in target.split("\n") if l.strip()]
em_label = int(pred_lines == gt_lines)
| pred_ids = extract_identifiers(prediction, lang) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: uukuguy/multi_loras
# Path: multi_loras/extract_lora.py
def do_extract_lora(args):
# Load base model and tuned model
model_kwargs = prepare_model_kwargs(args)
base_model = load_model_and_init_lora(args, args.base_model_name_or_path, model_kwargs)
tuned_model = load_model_and_init_lora(args, args.tuned_model_name_or_path, model_kwargs)
bits = args.bits
num_base_lora_modules = get_lora_modules_count(base_model, bits)
num_tuned_lora_modules = get_lora_modules_count(tuned_model, bits)
assert num_base_lora_modules == num_tuned_lora_modules, f"{num_base_lora_modules=}, {num_tuned_lora_modules=}"
pbar = tqdm(zip(_iter_lora(base_model, bits), _iter_lora(tuned_model, bits)),
total=num_base_lora_modules, ncols=120, desc="SVD")
rank = args.lora_r
clamp_quantile = args.clamp_quantile
device = base_model.device
dtype = base_model.dtype
for (name_base, lora_base), (name_tuned, lora_tune) in pbar:
assert name_base == name_tuned, f"name_base={name_base} != name_tuned={name_tuned}"
residual = lora_tune.weight.data - lora_base.weight.data
pbar.set_postfix({"layer": name_base.replace("base_model.model.", ""), "shape": residual.shape})
# SVD on residual
U, Vh = svd_distill(residual, rank=rank, clamp_quantile=clamp_quantile)
assert lora_base.lora_A.default.weight.shape == Vh.shape, f"{lora_base=}"
assert lora_base.lora_B.default.weight.shape == U.shape, f"{lora_base=}"
lora_base.lora_A.default.weight.data = Vh.to(device=device, dtype=dtype)
lora_base.lora_B.default.weight.data = U.to(device=device, dtype=dtype)
# Save the distilled model
print(f"Saving peft model to {args.save_path} ...")
base_model.save_pretrained(args.save_path)
print(f"Save done.")
# Path: multi_loras/merge_peft_adapters.py
def merge_peft_adapters(base_model_name_or_path, peft_model_path, merged_model_name_or_path=None, push_to_hub=False):
def main():
# Path: multi_loras/dare.py
def do_dare(args):
"""
This function is used to do drop and rescale for the tuned model
"""
print(f"Loading base model from {args.base_model_name_or_path} ...")
base_model = AutoModelForCausalLM.from_pretrained(
args.base_model_name_or_path, device_map=args.device_map, trust_remote_code=True
).half()
print(f"Loading tuned model from {args.tuned_model_name_or_path} ...")
tuned_model = AutoModelForCausalLM.from_pretrained(
args.tuned_model_name_or_path,
device_map=args.device_map,
trust_remote_code=True,
).half()
tokenizer = AutoTokenizer.from_pretrained(args.tuned_model_name_or_path, trust_remote_code=True)
dare_kwargs = {
"weight_mask_rate": args.dare_weight_mask_rate,
"use_weight_rescale": args.dare_use_weight_rescale,
"mask_strategy": args.dare_mask_strategy,
"scaling_coefficient": args.dare_scaling_coefficient,
}
print(
f"Do drop and rescale with {dare_kwargs=} with {args.tuned_model_name_or_path} ..."
)
model_weights = drop_and_rescale_model(
tuned_model=tuned_model,
base_model=base_model,
**dare_kwargs,
)
copy_params_to_model(model_weights, base_model)
print(f"Saving model to {args.save_path} ...")
tokenizer.save_pretrained(args.save_path)
base_model.save_pretrained(args.save_path)
print(f"Saved model to {args.save_path}")
# Path: multi_loras/delta_weights.py
def do_delta_weights(args):
"""
Compute the delta weights between two models and save the delta weights to a file
"""
base_model, tuned_model = load_models(args)
delta_weights = DeltaWeights(base_model=base_model, tuned_model=tuned_model)
print(f"Saving delta weights to {args.save_path} ...")
torch.save(delta_weights.params_dict, args.save_path)
print(f"Succesfully saved delta weights to {args.save_path}")
# Path: multi_loras/delta_weights.py
def do_orthogonal(args):
base_model, tuned_model = load_models(args)
print(f"Calculating orthogonal component ...")
base_params = get_model_params(base_model)
tuned_params = get_model_params(tuned_model)
orthogonal_params = {}
for key, tuned_weights in tqdm(tuned_params.items(), ncols=100, desc=f"Orthogonal"):
base_weights = base_params[key]
tuned_weights = tuned_weights.detach().cpu().numpy()
base_weights = base_weights.detach().cpu().numpy()
orthogonal_weights =calculate_orthogonal_component(base_weights, tuned_weights, scaling_factor=args.orthogonal_scaling_factor)
orthogonal_params[key] = torch.tensor(orthogonal_weights)
print(f"Combining orthogonal component with pretrained model ...")
delta_weights = DeltaWeights(params_dict=orthogonal_params)
new_model_weights = delta_weights.combine_with_pretrained_model(base_model)
copy_params_to_model(new_model_weights, base_model)
print(f"Saving model to {args.save_path} ...")
tokenizer = AutoTokenizer.from_pretrained(args.tuned_model_name_or_path, trust_remote_code=True)
tokenizer.save_pretrained(args.save_path)
base_model.save_pretrained(args.save_path)
print(f"Saved model to {args.save_path}")
# delta_weights = DeltaWeights(base_model=base_model, tuned_model=tuned_model)
# print(f"Saving delta weights layer params to {args.save_path} ...")
# delta_weights.save(args.save_path)
# print(f"Succesfully saved delta weights layer params to {args.save_path}")
# Path: multi_loras/__main__.py
from .extract_lora import do_extract_lora
from .merge_peft_adapters import do_merge_lora
from .dare import do_dare
from .delta_weights import do_delta_weights, do_orthogonal
from argparse import ArgumentParser
#!/usr/bin/env python
cmd_functions = {
"extract_lora": do_extract_lora,
"merge_lora": do_merge_lora,
"drop_and_rescale": do_dare,
| "delta_weights": do_delta_weights, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: myshell-ai/AIlice
# Path: ailice/common/AConfig.py
class AConfig():
def __init__(self):
def Initialize(self, needOpenaiGPTKey = False):
def Load(self, configFile: str) -> dict:
def Store(self, configFile: str):
# Path: ailice/prompts/ARegex.py
def GenerateRE4FunctionCalling(signature: str, faultTolerance: bool = False) -> str:
#signature: "FUNC<!|ARG1: ARG1_TYPE, ARG2: ARG2_TYPE...|!> -> RETURN_TYPE"
pattern = r"(\w+)<!\|((?:\w+[ ]*:[ ]*[\w, ]+)*)\|!>(?:[ ]*->[ ]*)(\w+)"
matches = re.search(pattern, signature)
if matches is None:
print("signature invalid. exit. ", signature)
exit()
funcName, args, retType = matches[1], matches[2], matches[3]
pattern = r"(\w+)[ ]*:[ ]*(\w+)"
typePairs = re.findall(pattern, args)
reMap = {k: v for k,v in ARegexMap.items()}
reMap["str"] = r"(?:.*(?=\|!>))" if faultTolerance else ARegexMap['str']
patternArgs = '[ ]*,[ ]*'.join([f"(?:({arg}|\"{arg}\"|\'{arg}\')[ ]*[:=][ ]*)?(?P<{arg}>({reMap[tp]}))" for arg,tp in typePairs])
return rf"!{funcName}<!\|[ ]*{patternArgs}[ ]*\|!>"
# Path: ailice/prompts/ATools.py
def ConstructOptPrompt(func, low:int, high: int, maxLen: int) -> str:
prompt = None
n = None
while low <= high:
mid = (low + high) // 2
p, length = func(mid)
if length < maxLen:
n = mid
prompt = p
low = mid + 1
else:
high = mid - 1
return prompt, n
# Path: ailice/prompts/APromptSearchEngine.py
from importlib.resources import read_text
from ailice.common.AConfig import config
from ailice.prompts.ARegex import GenerateRE4FunctionCalling
from ailice.prompts.ATools import ConstructOptPrompt
class APromptSearchEngine():
PROMPT_NAME = "search-engine"
def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):
self.processor = processor
self.conversations = conversations
self.formatter = formatter
self.outputCB = outputCB
self.prompt0 = read_text("ailice.prompts", "prompt_searchengine.txt")
self.PATTERNS = {"QUERY": [{"re": GenerateRE4FunctionCalling("QUERY<!|request: str|!> -> str", faultTolerance = True), "isEntry": True}],
"ARXIV": [{"re": GenerateRE4FunctionCalling("ARXIV<!|keywords: str|!> -> str", faultTolerance = True), "isEntry": True}],
"SCROLLDOWNARXIV": [{"re": GenerateRE4FunctionCalling("SCROLLDOWNARXIV<!||!> -> str", faultTolerance = True), "isEntry": True}],
"GOOGLE": [{"re": GenerateRE4FunctionCalling("GOOGLE<!|keywords: str|!> -> str", faultTolerance = True), "isEntry": True}],
"SCROLLDOWNGOOGLE": [{"re": GenerateRE4FunctionCalling("SCROLLDOWNGOOGLE<!||!> -> str", faultTolerance = True), "isEntry": True}],
"DUCKDUCKGO": [{"re": GenerateRE4FunctionCalling("DUCKDUCKGO<!|keywords: str|!> -> str", faultTolerance = True), "isEntry": True}],
"SCROLLDOWNDUCKDUCKGO": [{"re": GenerateRE4FunctionCalling("SCROLLDOWNDUCKDUCKGO<!||!> -> str", faultTolerance = True), "isEntry": True}],
"BROWSE": [{"re": GenerateRE4FunctionCalling("BROWSE<!|url: str|!> -> str", faultTolerance = True), "isEntry": True}],
"SCROLLDOWN": [{"re": GenerateRE4FunctionCalling("SCROLLDOWN<!||!> -> str"), "isEntry": True}],
"RESPOND": [{"re": GenerateRE4FunctionCalling("RESPOND<!|message: str|!> -> None", faultTolerance = True), "isEntry": True}]}
self.ACTIONS= {}
return
def Reset(self):
return
def GetPatterns(self):
return self.PATTERNS
def GetActions(self):
return self.ACTIONS
def ParameterizedBuildPrompt(self, n: int):
prompt = f"""
{self.prompt0}
End of general instructions.
"""
#prompt += "Conversations:"
ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))
return ret, self.formatter.Len(ret)
def BuildPrompt(self):
| prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Agora-X/Bing-Chat-API
# Path: src/bing_chat/conversation_style.py
CONVERSATION_STYLE_TYPE = Optional[
Union[ConversationStyle, Literal["creative", "balanced", "precise"]]
]
# Path: src/bing_chat/conversation_style.py
class ConversationStyle(Enum):
creative = [
"nlu_direct_response_filter",
"deepleo",
"disable_emoji_spoken_text",
"responsible_ai_policy_235",
"enablemm",
"h3imaginative",
"objopinion",
"dsblhlthcrd",
"dv3sugg",
"autosave",
"clgalileo",
"gencontentv3",
]
balanced = [
"nlu_direct_response_filter",
"deepleo",
"disable_emoji_spoken_text",
"responsible_ai_policy_235",
"enablemm",
"galileo",
"saharagenconv5",
"objopinion",
"dsblhlthcrd",
"dv3sugg",
"autosave",
]
precise = [
"nlu_direct_response_filter",
"deepleo",
"disable_emoji_spoken_text",
"responsible_ai_policy_235",
"enablemm",
"h3precise",
"objopinion",
"dsblhlthcrd",
"dv3sugg",
"autosave",
"clgalileo",
"gencontentv3",
]
# Path: src/bing_chat/utilities.py
def get_location_hint_from_locale(locale: str) -> Union[dict, None]:
locale = locale.lower()
if locale == "en-gb":
hint = LocationHint.UK.value
elif locale == "en-ie":
hint = LocationHint.EU.value
elif locale == "zh-cn":
hint = LocationHint.CHINA.value
else:
hint = LocationHint.USA.value
return hint.get("LocationHint")
# Path: src/bing_chat/utilities.py
def get_ran_hex(length: int = 32) -> str:
return "".join(random.choice("0123456789abcdef") for _ in range(length))
# Path: src/bing_chat/utilities.py
def guess_locale() -> str:
if sys.platform.startswith("win"):
return "en-us"
loc, _ = locale.getlocale()
return loc.replace("_", "-") if loc else "en-us"
# Path: src/bing_chat/request.py
import uuid
from datetime import datetime
from typing import Union
from .conversation_style import CONVERSATION_STYLE_TYPE
from .conversation_style import ConversationStyle
from .utilities import get_location_hint_from_locale
from .utilities import get_ran_hex
from .utilities import guess_locale
class ChatHubRequest:
def __init__(
self,
conversation_signature: str,
encrypted_conversation_signature: str,
client_id: str,
conversation_id: str,
invocation_id: int = 3,
) -> None:
self.struct: dict = {}
self.client_id: str = client_id
self.conversation_id: str = conversation_id
self.conversation_signature: str = conversation_signature
self.encrypted_conversation_signature: str = encrypted_conversation_signature
self.invocation_id: int = invocation_id
def update(
self,
prompt: str,
conversation_style: CONVERSATION_STYLE_TYPE,
webpage_context: Union[str, None] = None,
search_result: bool = False,
| locale: str = guess_locale(), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: f0uriest/interpax
# Path: interpax/utils.py
def errorif(cond, err=ValueError, msg=""):
"""Raise an error if condition is met.
Similar to assert but allows wider range of Error types, rather than
just AssertionError.
"""
if cond:
raise err(msg)
# Path: interpax/utils.py
def isbool(x):
"""Check if something is boolean or ndarray of bool type."""
return isinstance(x, bool) or (hasattr(x, "dtype") and (x.dtype == bool))
# Path: interpax/_spline.py
from collections import OrderedDict
from functools import partial
from typing import Union
from jax import jit
from .utils import errorif, isbool
import equinox as eqx
import jax
import jax.numpy as jnp
import numpy as np
"""Functions for interpolating splines that are JAX differentiable."""
CUBIC_METHODS = ("cubic", "cubic2", "cardinal", "catmull-rom")
OTHER_METHODS = ("nearest", "linear")
METHODS_1D = CUBIC_METHODS + OTHER_METHODS + ("monotonic", "monotonic-0")
METHODS_2D = CUBIC_METHODS + OTHER_METHODS
METHODS_3D = CUBIC_METHODS + OTHER_METHODS
class Interpolator1D(eqx.Module):
"""Convenience class for representing a 1D interpolated function.
Parameters
----------
x : ndarray, shape(Nx,)
coordinates of known function values ("knots")
f : ndarray, shape(Nx,...)
function values to interpolate
method : str
method of interpolation
- ``'nearest'``: nearest neighbor interpolation
- ``'linear'``: linear interpolation
- ``'cubic'``: C1 cubic splines (aka local splines)
- ``'cubic2'``: C2 cubic splines (aka natural splines)
- ``'catmull-rom'``: C1 cubic centripetal "tension" splines
- ``'cardinal'``: C1 cubic general tension splines. If used, can also pass
keyword parameter ``c`` in float[0,1] to specify tension
- ``'monotonic'``: C1 cubic splines that attempt to preserve monotonicity in the
data, and will not introduce new extrema in the interpolated points
- ``'monotonic-0'``: same as ``'monotonic'`` but with 0 first derivatives at
both endpoints
extrap : bool, float, array-like
whether to extrapolate values beyond knots (True) or return nan (False),
or a specified value to return for query points outside the bounds. Can
also be passed as a 2 element array or tuple to specify different conditions
for xq<x[0] and x[-1]<xq
period : float > 0, None
periodicity of the function. If given, function is assumed to be periodic
on the interval [0,period]. None denotes no periodicity
Notes
-----
This class is registered as a PyTree in JAX (it is actually an equinox.Module)
so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)
"""
x: jax.Array
f: jax.Array
derivs: dict
method: str
extrap: Union[bool, float, tuple]
period: Union[None, float]
axis: int
def __init__(
self,
x: jax.Array,
f: jax.Array,
method: str = "cubic",
extrap: Union[bool, float, tuple] = False,
period: Union[None, float] = None,
**kwargs,
):
x, f = map(jnp.asarray, (x, f))
axis = kwargs.get("axis", 0)
fx = kwargs.pop("fx", None)
| errorif( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: aszc-dev/ComfyUI-CoreMLSuite
# Path: coreml_suite/config.py
def get_model_config(model_version: ModelVersion):
unet_config = convert_config(config_map[model_version])
config = supported_models_base.BASE(unet_config)
config.latent_format = latent_format_map[model_version]()
return config
# Path: coreml_suite/config.py
class ModelVersion(Enum):
SD15 = "sd15"
SDXL = "sdxl"
SDXL_REFINER = "sdxl_refiner"
LCM = "lcm"
# Path: coreml_suite/controlnet.py
def extract_residual_kwargs(expected_inputs, control):
if "additional_residual_0" not in expected_inputs.keys():
return {}
if control is None:
return no_control(expected_inputs)
residual_kwargs = {
"additional_residual_{}".format(i): r.cpu().numpy().astype(np.float16)
for i, r in enumerate(chain(control["output"], control["middle"]))
}
return residual_kwargs
# Path: coreml_suite/controlnet.py
def chunk_control(cn, target_size):
if cn is None:
return [None] * target_size
num_chunks = ceil(cn["output"][0].shape[0] / target_size)
out = [{"output": [], "middle": []} for _ in range(num_chunks)]
for k, v in cn.items():
for i, x in enumerate(v):
chunks = chunk_batch(x, (target_size, *x.shape[1:]))
for j, chunk in enumerate(chunks):
out[j][k].append(chunk)
return out
# Path: coreml_suite/latents.py
def chunk_batch(input_tensor, target_shape):
if input_tensor.shape == target_shape:
return [input_tensor]
batch_size = input_tensor.shape[0]
target_batch_size = target_shape[0]
num_chunks = batch_size // target_batch_size
if num_chunks == 0:
padding = torch.zeros(target_batch_size - batch_size, *target_shape[1:]).to(
input_tensor.device
)
return [torch.cat((input_tensor, padding), dim=0)]
mod = batch_size % target_batch_size
if mod != 0:
chunks = list(torch.chunk(input_tensor[:-mod], num_chunks))
padding = torch.zeros(target_batch_size - mod, *target_shape[1:]).to(
input_tensor.device
)
padded = torch.cat((input_tensor[-mod:], padding), dim=0)
chunks.append(padded)
return chunks
chunks = list(torch.chunk(input_tensor, num_chunks))
return chunks
# Path: coreml_suite/latents.py
def merge_chunks(chunks, orig_shape):
merged = torch.cat(chunks, dim=0)
if merged.shape == orig_shape:
return merged
return merged[: orig_shape[0]]
# Path: coreml_suite/lcm/utils.py
def is_lcm(coreml_model):
return "timestep_cond" in coreml_model.expected_inputs
# Path: coreml_suite/logger.py
# Path: coreml_suite/models.py
import numpy as np
import torch
from comfy import model_base
from comfy.model_management import get_torch_device
from comfy.model_patcher import ModelPatcher
from coreml_suite.config import get_model_config, ModelVersion
from coreml_suite.controlnet import extract_residual_kwargs, chunk_control
from coreml_suite.latents import chunk_batch, merge_chunks
from coreml_suite.lcm.utils import is_lcm
from coreml_suite.logger import logger
class CoreMLModelWrapper:
def __init__(self, coreml_model):
self.coreml_model = coreml_model
self.dtype = torch.float16
def __call__(self, x, t, context, control, transformer_options=None, **kwargs):
inputs = CoreMLInputs(x, t, context, control, **kwargs)
input_list = inputs.chunks(self.expected_inputs)
chunked_out = [
self.get_torch_outputs(
self.coreml_model(**input_kwargs.coreml_kwargs(self.expected_inputs)),
x.device,
)
for input_kwargs in input_list
]
merged_out = merge_chunks(chunked_out, x.shape)
return merged_out
@staticmethod
def get_torch_outputs(model_output, device):
return torch.from_numpy(model_output["noise_pred"]).to(device)
@property
def expected_inputs(self):
return self.coreml_model.expected_inputs
@property
def is_lcm(self):
return is_lcm(self.coreml_model)
@property
def is_sdxl_base(self):
return is_sdxl_base(self.coreml_model)
@property
def is_sdxl_refiner(self):
return is_sdxl_refiner(self.coreml_model)
@property
def config(self):
if self.is_sdxl_base:
return get_model_config(ModelVersion.SDXL)
if self.is_sdxl_refiner:
return get_model_config(ModelVersion.SDXL_REFINER)
return get_model_config(ModelVersion.SD15)
class CoreMLModelWrapperLCM(CoreMLModelWrapper):
def __init__(self, coreml_model):
super().__init__(coreml_model)
self.config = None
class CoreMLInputs:
def __init__(self, x, t, context, control, **kwargs):
self.x = x
self.t = t
self.context = context
self.control = control
self.time_ids = kwargs.get("time_ids")
self.text_embeds = kwargs.get("text_embeds")
self.ts_cond = kwargs.get("timestep_cond")
def coreml_kwargs(self, expected_inputs):
sample = self.x.cpu().numpy().astype(np.float16)
context = self.context.cpu().numpy().astype(np.float16)
context = context.transpose(0, 2, 1)[:, :, None, :]
t = self.t.cpu().numpy().astype(np.float16)
model_input_kwargs = {
"sample": sample,
"encoder_hidden_states": context,
"timestep": t,
}
| residual_kwargs = extract_residual_kwargs(expected_inputs, self.control) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: aikunyi/FreTS
# Path: utils/masking.py
class TriangularCausalMask():
def __init__(self, B, L, device="cpu"):
mask_shape = [B, 1, L, L]
with torch.no_grad():
self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device)
@property
def mask(self):
return self._mask
# Path: utils/masking.py
class ProbMask():
def __init__(self, B, H, L, index, scores, device="cpu"):
_mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1)
_mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1])
indicator = _mask_ex[torch.arange(B)[:, None, None],
torch.arange(H)[None, :, None],
index, :].to(device)
self._mask = indicator.view(scores.shape).to(device)
@property
def mask(self):
return self._mask
# Path: layers/SelfAttention_Family.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import math
import os
from math import sqrt
from utils.masking import TriangularCausalMask, ProbMask
class FullAttention(nn.Module):
def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):
super(FullAttention, self).__init__()
self.scale = scale
self.mask_flag = mask_flag
self.output_attention = output_attention
self.dropout = nn.Dropout(attention_dropout)
def forward(self, queries, keys, values, attn_mask):
B, L, H, E = queries.shape
_, S, _, D = values.shape
scale = self.scale or 1. / sqrt(E)
scores = torch.einsum("blhe,bshe->bhls", queries, keys)
if self.mask_flag:
if attn_mask is None:
attn_mask = TriangularCausalMask(B, L, device=queries.device)
scores.masked_fill_(attn_mask.mask, -np.inf)
A = self.dropout(torch.softmax(scale * scores, dim=-1))
V = torch.einsum("bhls,bshd->blhd", A, values)
if self.output_attention:
return (V.contiguous(), A)
else:
return (V.contiguous(), None)
class ProbAttention(nn.Module):
def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):
super(ProbAttention, self).__init__()
self.factor = factor
self.scale = scale
self.mask_flag = mask_flag
self.output_attention = output_attention
self.dropout = nn.Dropout(attention_dropout)
def _prob_QK(self, Q, K, sample_k, n_top): # n_top: c*ln(L_q)
# Q [B, H, L, D]
B, H, L_K, E = K.shape
_, _, L_Q, _ = Q.shape
# calculate the sampled Q_K
K_expand = K.unsqueeze(-3).expand(B, H, L_Q, L_K, E)
index_sample = torch.randint(L_K, (L_Q, sample_k)) # real U = U_part(factor*ln(L_k))*L_q
K_sample = K_expand[:, :, torch.arange(L_Q).unsqueeze(1), index_sample, :]
Q_K_sample = torch.matmul(Q.unsqueeze(-2), K_sample.transpose(-2, -1)).squeeze()
# find the Top_k query with sparisty measurement
M = Q_K_sample.max(-1)[0] - torch.div(Q_K_sample.sum(-1), L_K)
M_top = M.topk(n_top, sorted=False)[1]
# use the reduced Q to calculate Q_K
Q_reduce = Q[torch.arange(B)[:, None, None],
torch.arange(H)[None, :, None],
M_top, :] # factor*ln(L_q)
Q_K = torch.matmul(Q_reduce, K.transpose(-2, -1)) # factor*ln(L_q)*L_k
return Q_K, M_top
def _get_initial_context(self, V, L_Q):
B, H, L_V, D = V.shape
if not self.mask_flag:
# V_sum = V.sum(dim=-2)
V_sum = V.mean(dim=-2)
contex = V_sum.unsqueeze(-2).expand(B, H, L_Q, V_sum.shape[-1]).clone()
else: # use mask
assert (L_Q == L_V) # requires that L_Q == L_V, i.e. for self-attention only
contex = V.cumsum(dim=-2)
return contex
def _update_context(self, context_in, V, scores, index, L_Q, attn_mask):
B, H, L_V, D = V.shape
if self.mask_flag:
| attn_mask = ProbMask(B, H, L_Q, index, scores, device=V.device) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lightly-ai/labelformat
# Path: src/labelformat/formats/yolov8.py
class YOLOv8ObjectDetectionInput(_YOLOv8BaseInput, ObjectDetectionInput):
def get_labels(self) -> Iterable[ImageObjectDetection]:
category_id_to_category = {
category.id: category for category in self.get_categories()
}
labels_dir = self._labels_dir()
for image in self.get_images():
label_path = (labels_dir / image.filename).with_suffix(".txt")
if not label_path.exists():
logger.warning(
f"Label file '{label_path}' for image '{image.filename}' does not exist."
)
with label_path.open() as file:
label_data = [line.split() for line in file.readlines()]
objects = []
for category_id, rcx, rcy, rw, rh in label_data:
cx = float(rcx) * image.width
cy = float(rcy) * image.height
w = float(rw) * image.width
h = float(rh) * image.height
objects.append(
SingleObjectDetection(
category=category_id_to_category[int(category_id)],
box=BoundingBox.from_format(
bbox=[cx, cy, w, h],
format=BoundingBoxFormat.CXCYWH,
),
)
)
yield ImageObjectDetection(
image=image,
objects=objects,
)
# Path: src/labelformat/formats/yolov8.py
class YOLOv8ObjectDetectionOutput(_YOLOv8BaseOutput, ObjectDetectionOutput):
def save(self, label_input: ObjectDetectionInput) -> None:
# Write config file.
self._output_file.parent.mkdir(parents=True, exist_ok=True)
_save_dataset_yaml(
output_file=self._output_file,
output_split=self._output_split,
categories=list(label_input.get_categories()),
)
# Write label files.
labels_dir = self._output_file.parent / "labels"
for label in label_input.get_labels():
label_path = (labels_dir / label.image.filename).with_suffix(".txt")
label_path.parent.mkdir(parents=True, exist_ok=True)
with label_path.open("w") as file:
for obj in label.objects:
cx, cy, w, h = obj.box.to_format(format=BoundingBoxFormat.CXCYWH)
rcx = cx / label.image.width
rcy = cy / label.image.height
rw = w / label.image.width
rh = h / label.image.height
file.write(f"{obj.category.id} {rcx} {rcy} {rw} {rh}\n")
# Path: src/labelformat/formats/yolov6.py
from labelformat.cli.registry import Task, cli_register
from .yolov8 import YOLOv8ObjectDetectionInput, YOLOv8ObjectDetectionOutput
"""
YOLOv6 format follows the same specs as YOLOv8.
"""
@cli_register(format="yolov6", task=Task.OBJECT_DETECTION)
class YOLOv6ObjectDetectionInput(YOLOv8ObjectDetectionInput):
pass
@cli_register(format="yolov6", task=Task.OBJECT_DETECTION)
| class YOLOv6ObjectDetectionOutput(YOLOv8ObjectDetectionOutput): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: amitfin/oref_alert
# Path: custom_components/oref_alert/const.py
ADD_SENSOR_SERVICE: Final = "add_sensor"
# Path: custom_components/oref_alert/const.py
ATTR_COUNTRY_ALERTS: Final = "country_alerts"
# Path: custom_components/oref_alert/const.py
ATTR_COUNTRY_ACTIVE_ALERTS: Final = "country_active_alerts"
# Path: custom_components/oref_alert/const.py
ATTR_SELECTED_AREAS_ALERTS: Final = "selected_areas_alerts"
# Path: custom_components/oref_alert/const.py
ATTR_SELECTED_AREAS_ACTIVE_ALERTS: Final = "selected_areas_active_alerts"
# Path: custom_components/oref_alert/const.py
CONF_ALERT_MAX_AGE: Final = "alert_max_age"
# Path: custom_components/oref_alert/const.py
CONF_AREAS: Final = "areas"
# Path: custom_components/oref_alert/const.py
CONF_OFF_ICON: Final = "off_icon"
# Path: custom_components/oref_alert/const.py
CONF_ON_ICON: Final = "on_icon"
# Path: custom_components/oref_alert/const.py
CONF_POLL_INTERVAL: Final = "poll_interval"
# Path: custom_components/oref_alert/const.py
DOMAIN: Final = "oref_alert"
# Path: custom_components/oref_alert/const.py
OREF_ALERT_UNIQUE_ID: Final = "oref_alert"
# Path: custom_components/oref_alert/const.py
ALL_AREAS_ID_SUFFIX: Final = "all_areas"
# Path: tests/utils.py
def load_json_fixture(file_name: str) -> Any:
"""Return a json object from a local fixture file."""
with open(
fixture_path(file_name),
encoding="utf-8",
) as file:
return json.load(file)
# Path: tests/utils.py
def mock_urls(
aioclient_mock: AiohttpClientMocker,
real_time_fixture: str | None,
history_fixture: str | None,
**kwargs: Any,
) -> None:
"""Mock the URLs."""
aioclient_mock.clear_requests()
aioclient_mock.get(
OREF_ALERTS_URL,
text=load_fixture(real_time_fixture) if real_time_fixture else "",
**kwargs,
)
aioclient_mock.get(
OREF_HISTORY_URL,
text=load_fixture(history_fixture) if history_fixture else "",
**kwargs,
)
# Path: tests/test_binary_sensor.py
import datetime
import pytest
from typing import Any
from freezegun.api import FrozenDateTimeFactory
from homeassistant.const import CONF_NAME, Platform, STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from pytest_homeassistant_custom_component.common import (
MockConfigEntry,
async_fire_time_changed,
)
from pytest_homeassistant_custom_component.test_util.aiohttp import AiohttpClientMocker
from custom_components.oref_alert.const import (
ADD_SENSOR_SERVICE,
ATTR_COUNTRY_ALERTS,
ATTR_COUNTRY_ACTIVE_ALERTS,
ATTR_SELECTED_AREAS_ALERTS,
ATTR_SELECTED_AREAS_ACTIVE_ALERTS,
CONF_ALERT_MAX_AGE,
CONF_AREAS,
CONF_OFF_ICON,
CONF_ON_ICON,
CONF_POLL_INTERVAL,
DOMAIN,
OREF_ALERT_UNIQUE_ID,
ALL_AREAS_ID_SUFFIX,
)
from .utils import load_json_fixture, mock_urls
"""The tests for the binary_sensor file."""
from __future__ import annotations
DEFAULT_OPTIONS = {CONF_AREAS: ["בארי"], CONF_ALERT_MAX_AGE: 10}
ENTITY_ID = f"{Platform.BINARY_SENSOR}.{OREF_ALERT_UNIQUE_ID}"
async def async_setup(
hass: HomeAssistant, options: dict[str, Any] | None = None
) -> str:
"""Integration setup."""
options = options or {}
config_entry = MockConfigEntry(
domain=DOMAIN, options={**DEFAULT_OPTIONS, **options}
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
return config_entry.entry_id
async def async_shutdown(hass: HomeAssistant, config_id: str) -> None:
"""Shutdown by removing the integration."""
assert await hass.config_entries.async_remove(config_id)
await hass.async_block_till_done()
async def test_state(
hass: HomeAssistant,
aioclient_mock: AiohttpClientMocker,
freezer: FrozenDateTimeFactory,
) -> None:
"""Test entity state."""
freezer.move_to("2023-10-07 06:30:00+03:00")
mock_urls(aioclient_mock, None, "single_alert_history.json")
config_id = await async_setup(hass)
assert hass.states.get(ENTITY_ID).state == STATE_ON
freezer.move_to("2023-10-07 06:39:50+03:00")
async_fire_time_changed(hass)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_ID).state == STATE_ON
freezer.move_to("2023-10-07 06:40:01+03:00")
async_fire_time_changed(hass)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_ID).state == STATE_OFF
await async_shutdown(hass, config_id)
@pytest.mark.parametrize(
("areas",),
((["תל אביב - כל האזורים"],), (["מחוז דן"],)),
ids=("City all areas", "District"),
)
async def test_real_time_alert_area_expansion(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, areas: list[str]
) -> None:
"""Test real time alert and city expansion."""
mock_urls(aioclient_mock, "single_alert_real_time.json", None)
config_id = await async_setup(hass, {CONF_AREAS: areas})
assert hass.states.get(ENTITY_ID).state == STATE_ON
await async_shutdown(hass, config_id)
async def test_state_attributes(
hass: HomeAssistant,
aioclient_mock: AiohttpClientMocker,
freezer: FrozenDateTimeFactory,
) -> None:
"""Test state attributes."""
freezer.move_to("2023-10-07 06:30:00+03:00")
mock_urls(
aioclient_mock, "multi_alerts_real_time.json", "multi_alerts_history.json"
)
config_id = await async_setup(hass)
state = hass.states.get(ENTITY_ID)
| active_area_alert = load_json_fixture("single_alert_history.json") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: apple/ml-nvas3d
# Path: soundspaces_nvas3d/utils/ss_utils.py
def render_rir_parallel(room_list: T.List[str],
source_position_list: T.List[T.Tuple[float, float, float]],
receiver_position_list: T.List[T.Tuple[float, float, float]],
filename_list: T.List[str] = None,
receiver_rotation_list: T.List[float] = None,
batch_size: int = 64,
sample_rate: float = 48000,
use_default_material: bool = False,
channel_type: str = 'Ambisonics',
channel_order: int = 1
) -> T.List[torch.Tensor]:
"""
Run render_ir parallely for all elements of zip(source_position_list, receiver_position_list).
"""
assert len(room_list) == len(source_position_list)
assert len(source_position_list) == len(receiver_position_list)
if filename_list is None:
is_return = True
else:
is_return = False
if receiver_rotation_list is None:
receiver_rotation_list = [0] * len(receiver_position_list)
# Note: Make sure all rooms are downloaded
# Calculate the number of batches
num_points = len(source_position_list)
num_batches = (num_points + batch_size - 1) // batch_size
# Use tqdm to display the progress bar
progress_bar = tqdm(total=num_points)
def update_progress(*_):
progress_bar.update()
ir_list = []
# Process the tasks in batches
for batch_idx in range(num_batches):
# Calculate the start and end indices of the current batch
start_idx = batch_idx * batch_size
end_idx = min(start_idx + batch_size, num_points)
if is_return:
batch = [(room_list[i], source_position_list[i], receiver_position_list[i], None, receiver_rotation_list[i]) for i in range(start_idx, end_idx)]
else:
batch = [(room_list[i], source_position_list[i], receiver_position_list[i], filename_list[i], receiver_rotation_list[i]) for i in range(start_idx, end_idx)]
# Create a multiprocessing Pool for the current batch
with multiprocessing.Pool() as pool:
tasks = []
for room, source_position, receiver_position, filename, receiver_rotation in batch:
# Apply async mapping of process_ir function
task = pool.apply_async(render_ir, args=(room, source_position, receiver_position, filename, receiver_rotation, sample_rate, use_default_material, channel_type, channel_order), callback=update_progress)
tasks.append(task)
# Wait for all tasks in the batch to complete and collect results
for task in tasks:
if is_return:
ir = task.get() # Block until the result is ready
ir_list.append(ir) # Append the result to the list
else:
task.get()
if is_return:
return ir_list
# Path: soundspaces_nvas3d/utils/aihabitat_utils.py
def load_room_grid(
room: str,
grid_distance: float
) -> T.Dict:
"""
Load grid data for a specified room. If the grid data does not exist, it generates one.
Args:
- room: Name of the room.
- grid_distance: The spacing between grid points.
Returns:
- A dictionary containing grid information for the specified room.
"""
grid_distance_str = str(grid_distance).replace(".", "_")
dirname_grid = f'data/scene_datasets/metadata/mp3d/grid_{grid_distance_str}'
filename_grid = f'{dirname_grid}/grid_{room}.npy'
if not os.path.exists(filename_grid):
os.makedirs(dirname_grid, exist_ok=True)
print(f'Computing grid_{room}...')
from soundspaces_nvas3d.rir_generation.generate_grid import save_xy_grid_points
grid_info = save_xy_grid_points(room, grid_distance, dirname_grid)
# load grid
grid_info = np.load(filename_grid, allow_pickle=True).item()
return grid_info
# Path: soundspaces_nvas3d/rir_generation/generate_rir.py
import os
import argparse
import itertools
from soundspaces_nvas3d.utils.ss_utils import render_rir_parallel
from soundspaces_nvas3d.utils.aihabitat_utils import load_room_grid
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2023 Apple Inc. All Rights Reserved.
#
def generate_rir(args: argparse.Namespace) -> None:
"""
Generate Room Impulse Response (RIR) based on given room and grid distance.
"""
grid_distance_str = str(args.grid_distance).replace(".", "_")
dirname = os.path.join(args.dirname, f'rir_mp3d/grid_{grid_distance_str}', args.room)
os.makedirs(dirname, exist_ok=True)
| grid_data = load_room_grid(args.room, grid_distance=args.grid_distance) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kwonathan/language-models-trajectory-generators
# Path: prompts/success_detection_prompt.py
SUCCESS_DETECTION_PROMPT = \
"""You are tasked with determining whether a user command was completed successfully or not, based on how the positions and orientations of the relevant objects in the environment changed during the execution of the task.
The 3D coordinate system of the environment is as follows:
1. The x-axis is in the horizontal direction, increasing to the right.
2. The y-axis is in the depth direction, increasing away from you.
3. The z-axis is in the vertical direction, increasing upwards.
The position values are in metres.
The objects can rotate about the z-axis, from -pi to pi radians.
Negative rotation values represent clockwise rotation, and positive rotation values represent anticlockwise rotation. The rotation values are in radians.
The user command is "[INSERT TASK]".
1. Given the user command, describe how the object positions and orientations should have changed during the execution of the task.
2. From the given positions and orientations of the relevant objects, output whether the task was completed successfully or not.
3. If the task was completed successfully, output
```python
task_completed()
```.
4. If the task was not completed successfully, output
```python
task_failed()
```.
Do not define the task_completed and task_failed functions yourself.
The positions and orientations of the relevant objects in the environment are as follows:
"""
# Path: config.py
OK = "\033[92m"
# Path: config.py
PROGRESS = "\033[93m"
# Path: config.py
FAIL = "\033[91m"
# Path: config.py
ENDC = "\033[0m"
# Path: config.py
CAPTURE_IMAGES = 1
# Path: config.py
ADD_BOUNDING_CUBES = 2
# Path: config.py
ADD_TRAJECTORY_POINTS = 3
# Path: config.py
EXECUTE_TRAJECTORY = 4
# Path: config.py
OPEN_GRIPPER = 5
# Path: config.py
CLOSE_GRIPPER = 6
# Path: config.py
TASK_COMPLETED = 7
# Path: config.py
RESET_ENVIRONMENT = 8
# Path: api.py
import numpy as np
import sys
import torch
import math
import config
import models
import utils
from PIL import Image
from prompts.success_detection_prompt import SUCCESS_DETECTION_PROMPT
from config import OK, PROGRESS, FAIL, ENDC
from config import CAPTURE_IMAGES, ADD_BOUNDING_CUBES, ADD_TRAJECTORY_POINTS, EXECUTE_TRAJECTORY, OPEN_GRIPPER, CLOSE_GRIPPER, TASK_COMPLETED, RESET_ENVIRONMENT
class API:
def __init__(self, args, main_connection, logger, langsam_model, xmem_model, device):
self.args = args
self.main_connection = main_connection
self.logger = logger
self.langsam_model = langsam_model
self.xmem_model = xmem_model
self.device = device
self.segmentation_texts = []
self.segmentation_count = 0
self.trajectory_length = 0
self.attempted_task = False
self.completed_task = False
self.failed_task = False
self.head_camera_position = None
self.head_camera_orientation_q = None
self.wrist_camera_position = None
self.wrist_camera_orientation_q = None
self.command = None
def detect_object(self, segmentation_text):
self.logger.info(PROGRESS + "Capturing head and wrist camera images..." + ENDC)
self.main_connection.send([CAPTURE_IMAGES])
[head_camera_position, head_camera_orientation_q, wrist_camera_position, wrist_camera_orientation_q, env_connection_message] = self.main_connection.recv()
self.logger.info(env_connection_message)
self.head_camera_position = head_camera_position
self.head_camera_orientation_q = head_camera_orientation_q
self.wrist_camera_position = wrist_camera_position
self.wrist_camera_orientation_q = wrist_camera_orientation_q
rgb_image_head = Image.open(config.rgb_image_head_path).convert("RGB")
depth_image_head = Image.open(config.depth_image_head_path).convert("L")
depth_array = np.array(depth_image_head) / 255.
if self.segmentation_count == 0:
xmem_image = Image.fromarray(np.zeros_like(depth_array)).convert("L")
xmem_image.save(config.xmem_input_path)
segmentation_texts = [segmentation_text]
self.logger.info(PROGRESS + "Segmenting head camera image..." + ENDC)
model_predictions, boxes, segmentation_texts = models.get_langsam_output(rgb_image_head, self.langsam_model, segmentation_texts, self.segmentation_count)
| self.logger.info(OK + "Finished segmenting head camera image!" + ENDC) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: VikParuchuri/classified
# Path: app/labeler/lens.py
class Lens:
def __init__(self, lens_type):
self.lens_type = lens_type
self.template_dir = os.path.join(settings.LENS_DIR, lens_type)
self.function = self.get_function()
self.system_prompt = self.get_system_template()
self.config = self.get_config()
self.input_fields = self.config["input_fields"]
def get_system_template(self):
return render_template("system", self.template_dir)
def prompt_template(self, *args):
if len(args) != len(self.input_fields):
raise ValueError(f"Missing one or more required fields {self.input_fields} for lens {self.lens_type}")
kwargs = dict(zip(self.input_fields, args))
return render_template("prompt", self.template_dir, **kwargs)
def get_function(self):
with open(f"{self.template_dir}/function.json") as f:
functions = json.load(f)
return functions
def get_config(self):
with open(f"{self.template_dir}/config.json") as f:
config = json.load(f)
return config
def labels(self):
return self.function["parameters"]["required"]
def score_labels(self):
return [l for l in self.labels() if self.function["parameters"]["properties"][l]["type"] in ["integer", "float", "number"]]
def rationale_labels(self):
return [l for l in self.labels() if self.function["parameters"]["properties"][l]["type"] == "string"]
def rater_type(self):
return self.config["type"]
# Path: app/labeler/raters/common.py
def get_final_score(scores):
final_score = 0
if all([s >= 2.5 for s in scores]) and scores[-1] >= 2.75:
final_score = 3
elif all([s >= 1.5 for s in scores]) and scores[-1] >= 2:
final_score = 2
elif all([s >= 0.5 for s in scores]) and scores[-1] >= 1:
final_score = 1
return final_score
# Path: app/llm/llm.py
def chat_completion(lens_type, messages, functions: None | List[Dict] = None, model=settings.CHAT_MODEL, max_tokens=settings.MAX_GENERATION_TOKENS, temperature=.2, version=1, cache=True):
if cache:
response = query_cached_response(lens_type, messages, functions, model, version)
if response:
return response.response
response = _chat_completion(messages, functions, model, max_tokens, temperature)
if cache and response:
save_cached_response(lens_type, messages, functions, response, model, version)
return response
# Path: app/labeler/raters/instruct.py
import json
from typing import List
from app.labeler.lens import Lens
from app.labeler.raters.common import get_final_score
from app.llm.llm import chat_completion
def rate_data(resource: List[str], lens_type: str, version: int = 1):
lens = Lens(lens_type)
instruction, output = resource
user_prompt = lens.prompt_template(instruction, output)
messages = [
{"role": "system", "content": lens.system_prompt},
{"role": "user", "content": user_prompt},
]
| chat_response = chat_completion(lens_type, messages, [lens.function], version=version) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: tiejundong/FlexPose
# Path: FlexPose/utils/common.py
def print_args(args):
print('=' * 30 + ' Current settings ' + '=' * 30)
for k, v in args.__dict__.items():
print(k.ljust(40, '.'), v)
print('=' * (60 + len(' Current settings ')))
# Path: FlexPose/utils/common.py
def delmkdir(path, remove_old=True):
isexist = os.path.exists(path)
if not isexist:
os.makedirs(path)
if isexist == True and remove_old:
shutil.rmtree(path)
os.makedirs(path)
# Path: FlexPose/preprocess/prepare_for_training.py
def try_prepare_APOPDBbind(*args, **kwargs):
try:
save_APOPDBbind(*args, **kwargs)
except:
pass
# Path: FlexPose/preprocess/prepare_for_training.py
def save_APOPDBbind(tup_in):
dic_data = prepare(tup_in)
save_path, pdb_id, df_apo_sub, apo_path, pdbbind_path, aff_path, MCaug_path, df_apo_sub, have_apo, max_len_pocket, max_len_ligand, tmp_path = tup_in
# np.savez_compressed(npz_save_path + '/{}.npz'.format(pdb_id), **dic_data)
pickle.dump(dic_data, open(save_path + '/{}.pkl'.format(pdb_id), 'wb'))
# Path: FlexPose/preprocess/prepare_APOPDBbind.py
import os
import shutil
import sys
import argparse
import pandas as pd
from ray.util.multiprocessing import Pool
from tqdm import tqdm
from FlexPose.utils.common import print_args, delmkdir
from FlexPose.preprocess.prepare_for_training import try_prepare_APOPDBbind, save_APOPDBbind
sys.path.append('/'.join(os.path.abspath(__file__).split('/')[:-2]))
if __name__ == '__main__':
# main args
parser = argparse.ArgumentParser()
# data source
parser.add_argument('--apobind_path', type=str,
default='/home/dtj/work_site/test/tmp/data/apobind', help='APObind dataset path')
parser.add_argument('--pdbbind_path', type=str,
default='/home/dtj/work_site/test/tmp/data/v2020-PL', help='PDBbind dataset path')
parser.add_argument('--apo_info_path', type=str,
default='/home/dtj/work_site/test/tmp/data/apobind_all.csv', help='APObind apo-holo mapping csv path (provided by APObind)')
parser.add_argument('--aff_info_path', type=str,
default='/home/dtj/work_site/test/tmp/data/index/INDEX_general_PL_data.2020',
help='PDBbind affinity data path')
parser.add_argument('--aug_path', type=str,
default='/home/dtj/work_site/test/tmp/data/pdbbind_MC', help='Rosetta decoys (pseudo apo structures)')
# parameters
parser.add_argument('--max_len_pocket', type=int,
default=50, help='max number of protein pocket residues')
parser.add_argument('--max_len_ligand', type=int,
default=50, help='max number of ligand atoms')
# other
parser.add_argument('--tmp_path', type=str,
default='./tmp', help='tmp file for temporary saving')
# output
parser.add_argument('--save_path', type=str,
default='/home/dtj/work_site/test/tmp/data/processed_data_maxp50_maxl50', help='output path (preprocessed), npz or pkl')
args = parser.parse_args()
| print_args(args) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: openvpi/SingingVocoders
# Path: modules/ddsp/loss.py
class RSSLoss(nn.Module):
'''
Random-scale Spectral Loss.
'''
def __init__(self, fft_min, fft_max, n_scale, alpha=1.0, overlap=0, eps=1e-7, device='cuda'):
super().__init__()
self.fft_min = fft_min
self.fft_max = fft_max
self.n_scale = n_scale
self.lossdict = {}
for n_fft in range(fft_min, fft_max):
self.lossdict[n_fft] = SSSLoss(n_fft, alpha, overlap, eps).to(device)
def forward(self, x_pred, x_true):
value = 0.
n_ffts = torch.randint(self.fft_min, self.fft_max, (self.n_scale,))
for n_fft in n_ffts:
loss_func = self.lossdict[int(n_fft)]
value += loss_func(x_true, x_pred)
return value / self.n_scale
# Path: modules/loss/stft_loss.py
class warp_stft:
def __init__(self,cfg={},divce='cuda'):
self.stft=MultiResolutionSTFTLoss(**cfg).to(divce)
def loss(self,x, y):
return self.stft(x, y)
# Path: utils/wav2mel.py
class PitchAdjustableMelSpectrogram:
def __init__(
self,
sample_rate=44100,
n_fft=2048,
win_length=2048,
hop_length=512,
f_min=40,
f_max=16000,
n_mels=128,
center=False,
):
self.sample_rate = sample_rate
self.n_fft = n_fft
self.win_size = win_length
self.hop_length = hop_length
self.f_min = f_min
self.f_max = f_max
self.n_mels = n_mels
self.center = center
self.mel_basis = {}
self.hann_window = {}
def __call__(self, y, key_shift=0, speed=1.0):
factor = 2 ** (key_shift / 12)
n_fft_new = int(np.round(self.n_fft * factor))
win_size_new = int(np.round(self.win_size * factor))
hop_length = int(np.round(self.hop_length * speed))
# if torch.min(y) < -1.0:
# logger.warning(f"min value is {torch.min(y)}")
# if torch.max(y) > 1.0:
# logger.warning(f"max value is {torch.max(y)}")
mel_basis_key = f"{self.f_max}_{y.device}"
if mel_basis_key not in self.mel_basis:
mel = librosa_mel_fn(
sr=self.sample_rate,
n_fft=self.n_fft,
n_mels=self.n_mels,
fmin=self.f_min,
fmax=self.f_max,
)
self.mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device)
hann_window_key = f"{key_shift}_{y.device}"
if hann_window_key not in self.hann_window:
self.hann_window[hann_window_key] = torch.hann_window(
win_size_new, device=y.device
)
y = torch.nn.functional.pad(
y.unsqueeze(1),
(
int((win_size_new - hop_length) // 2),
int((win_size_new - hop_length+1) // 2),
),
mode="reflect",
)
y = y.squeeze(1)
spec = torch.stft(
y,
n_fft_new,
hop_length=hop_length,
win_length=win_size_new,
window=self.hann_window[hann_window_key],
center=self.center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=True,
).abs()
# spec = torch.view_as_real(spec)
# spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))
if key_shift != 0:
size = self.n_fft // 2 + 1
resize = spec.size(1)
if resize < size:
spec = F.pad(spec, (0, 0, 0, size - resize))
spec = spec[:, :size, :] * self.win_size / win_size_new
spec = torch.matmul(self.mel_basis[mel_basis_key], spec)
return spec
def dynamic_range_compression_torch(self,x, C=1, clip_val=1e-5):
return torch.log(torch.clamp(x, min=clip_val) * C)
# Path: modules/loss/vaeHiFiloss.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.ddsp.loss import RSSLoss
from modules.loss.stft_loss import warp_stft
from utils.wav2mel import PitchAdjustableMelSpectrogram
def kl_loss(logs, m):
kl = 0.5 * (m**2 + torch.exp(logs) - logs - 1).sum(dim=1)
kl = torch.mean(kl)
return kl
class HiFiloss(nn.Module):
def __init__(self,config:dict):
super().__init__()
| self.mel=PitchAdjustableMelSpectrogram( sample_rate=config['audio_sample_rate'], |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: RobertCsordas/moe
# Path: tasks/simple/language_model/enwik8_transformer.py
class Enwik8Transformer(TransformerLMMixin, SimpleTask):
VALID_NUM_WORKERS = 1
TRAIN_NUM_WORKERS = 2
def create_state(self):
self.helper.state.epoch = 0
def create_model_interface(self):
self.model_interface = LanguageModelInterface(
self.model, drop_state_prob=self.helper.args.lm.state_drop_probability, dist_env=self.helper.dist_env)
self.helper.saver["interface"] = self.model_interface
def validate_on(self, set: torch.utils.data.Dataset, loader: torch.utils.data.DataLoader) -> Tuple[Any, float]:
state = self.model_interface.state
self.model_interface.reset_state()
res = super().validate_on(set, loader)
self.model_interface.state = state
return res
def log_epoch(self):
self.helper.log({"epoch": self.helper.state.epoch})
def start_next_epoch(self):
self.model_interface.reset_state()
self.helper.state.epoch += 1
self.log_epoch()
def get_train_batch(self) -> Dict[str, Any]:
try:
return next(self.data_iter)
except StopIteration:
self.start_next_epoch()
self.data_iter = iter(self.train_loader)
return next(self.data_iter)
def create_sampler(self, loader: torch.utils.data.Dataset, batch_size: int) -> \
framework.loader.sampler.MultibatchSequentialSampler:
return framework.loader.sampler.MultibatchSequentialSampler(loader, batch_size,
world_size=self.helper.dist_env.world_size, rank=self.helper.dist_env.rank)
def create_valid_loader(self, vset: torch.utils.data.Dataset) -> torch.utils.data.DataLoader:
return torch.utils.data.DataLoader(vset,
batch_sampler=self.create_sampler(vset, self.test_batch_size),
collate_fn=framework.loader.collate.VarLengthCollate(batch_dim=self.batch_dim),
num_workers=self.VALID_NUM_WORKERS)
def create_train_loader(self, loader: torch.utils.data.Dataset) -> torch.utils.data.DataLoader:
sampler = self.create_sampler(loader, self.helper.args.batch_size)
self.helper.saver.register("sampler", sampler, replace=True)
return torch.utils.data.DataLoader(loader, batch_sampler=sampler, num_workers=self.TRAIN_NUM_WORKERS,
pin_memory=True, collate_fn=framework.loader.collate.VarLengthCollate(
batch_dim=self.batch_dim))
def create_datasets(self):
self.batch_dim = 1
self.train_set = dataset.Enwik8("train", self.helper.args.lm.unroll)
self.valid_sets.val = dataset.Enwik8("valid", self.helper.args.lm.unroll_eval or self.helper.args.lm.unroll)
self.valid_sets.test = dataset.Enwik8("test", self.helper.args.lm.unroll_eval or self.helper.args.lm.unroll)
def train(self):
self.log_epoch()
super().train()
# Path: tasks/task_db.py
def task(name: Optional[str] = None):
def wrapper(cls):
n = TASK_PREFIX + (name or camel_to_snake(cls.__name__))
assert n not in TASKS, f"Task {n} already exists"
TASKS[n] = cls
return cls
return wrapper
# Path: tasks/task_db.py
def args(fn):
global ARGS_REGISTERS
ARGS_REGISTERS.append(fn)
return fn
# Path: tasks/simple/language_model/wikitext103_sp_transformer.py
import torch
import dataset
import framework
from .enwik8_transformer import Enwik8Transformer
from ... import task, args
@args
def a(parser: framework.helpers.ArgumentParser):
parser.add_argument("-sentencepiece.n_pieces", default=8000)
| @task() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: yk/llmvm
# Path: interface.py
class Arg(pydantic.BaseModel):
vtype: str
value: str
# Path: interface.py
class Load(Expr):
kind: str = "load"
vtype: str
ptr: str
# Path: interface.py
class Icmp(Expr):
kind: str = "icmp"
vtype: str
op: str
lhs: str
rhs: str
# Path: interface.py
class Srem(Expr):
kind: str = "srem"
vtype: str
lhs: str
rhs: str
# Path: interface.py
class Add(Expr):
kind: str = "add"
vtype: str
lhs: str
rhs: str
# Path: interface.py
class Mul(Expr):
kind: str = "mul"
vtype: str
lhs: str
rhs: str
# Path: interface.py
class Call(Expr):
kind: str = "call"
name: str
args: list[Arg]
# Path: interface.py
class Assign(Instruction):
kind: str = "assign"
reg: str
expr: Expr
# Path: interface.py
class Store(Instruction):
kind: str = "store"
vtype: str
value: str
ptr: str
# Path: interface.py
class Branch(Instruction):
kind: str = "branch"
label: str
# Path: interface.py
class BranchCond(Instruction):
kind: str = "branch_cond"
cond_reg: str
label_true: str
label_false: str
# Path: interface.py
class Return(Instruction):
kind: str = "return"
vtype: str
value: str
# Path: interface.py
class Program(pydantic.BaseModel):
instructions: list[Instruction]
labels: dict[str, int]
constants: dict[str, Any]
convert_numbers_to_chars: bool = False
# Path: interface.py
def to_vtype(value, vtype):
match vtype:
case "i32":
return int(value)
case "i8":
return str(value)
case "str":
return str(value)
raise NotImplementedError(vtype)
# Path: interface.py
class GetElementPtr(Expr):
kind: str = "get_element_ptr"
vtype: str
ptr: str
idx: str
# Path: interface.py
class Copy(Expr):
kind: str = "copy"
ptr: str
# Path: interface.py
class Switch(Instruction):
kind: str = "switch"
ptr: str
default_label: str
cases: dict[str, str]
# Path: interface.py
class AllocArray(Expr):
kind: str = "alloc_array"
vtype: str
size: int
# Path: interface.py
class Alloc(Expr):
kind: str = "alloc"
vtype: str
# Path: parsing.py
import re
from loguru import logger
from interface import Arg, Load, Icmp, Srem, Add, Mul, Call, Assign, Store, Branch, BranchCond, Return, Program, to_vtype, GetElementPtr, Copy, Switch, AllocArray, Alloc
def _line_stripper(in_f):
for line in in_f:
line = line.rstrip()
if not line:
continue
yield line
def parse_arg(arg):
logger.debug(f"parse_arg({arg})")
if m := re.match(r"ptr noundef (\S+)", arg):
return Arg(vtype="str", value=m.group(1))
if m := re.match(r"i32 noundef (\S+)", arg):
return Arg(vtype="i32", value=m.group(1))
raise NotImplementedError(arg)
def parse_call(expr):
logger.debug(f"parse_call({expr})")
if m := re.match(r"\s*call \w+(?: \(.*\))? @(\w+)\((.*)\)", expr):
name, args = m.groups()
args = args.split(", ")
args = [parse_arg(arg) for arg in args if arg]
| return Call(name=name, args=args) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: w-e-w/sd-webui-nudenet-nsfw-censor
# Path: scripts/nudenet_nsfw_censor_scripts/pil_nude_detector.py
def draw_ellipse(draw, left_expanded, top_expanded, right_expanded, down_expanded, *args, **kwargs):
def draw_rectangle(draw, left_expanded, top_expanded, right_expanded, down_expanded, *args, **kwargs):
def rounded_rectangle(draw, left_expanded, top_expanded, right_expanded, down_expanded, width_expanded, height_expanded, rectangle_round_radius, *args, **kwargs):
def __init__(self):
def init_onnx(self):
def change_onnx_provider(self):
def refresh_label_configs(self):
def pre_process_pil(self, pil_image):
def calculate_censor_mask(self, detection_results, img_size, thresholds, expand_horizontal, expand_vertical, nms_threshold, nudenet_nsfw_censor_mask_shape, rectangle_round_radius):
def get_censor_mask(self, pil_image, nms_threshold, nudenet_nsfw_censor_mask_shape, rectangle_round_radius, thresholds, expand_horizontal, expand_vertical):
class PilNudeDetector:
# Path: scripts/nudenet_nsfw_censor_scripts/censor_image_filters.py
def combine_results(input_image, input_mask, processed):
def variable_blur(input_image: Image, control_mask: Image, blur_radius: float = 10, blur_strength_curve: float = 3, *args, **kwargs):
def mask_array_to_img(i):
def img_gaussian_blur(i):
def combine_mask(index_1, index_2, pre_step_size):
def combine(index_1, index_2, pre_step_size):
def gaussian_blur(input_image, input_mask, blur_radius, *args, **kwargs):
def pixelate(input_image, input_mask, pixelation_factor, *args, **kwargs):
def fill_color(input_image, input_mask, color, *args, **kwargs):
def do_nothing(input_image, *args, **kwargs):
def apply_filter(input_image, input_mask, filter_type, *args, **kwargs):
# Path: scripts/nudenet_nsfw_censor_scripts/api.py
from scripts.nudenet_nsfw_censor_scripts.pil_nude_detector import pil_nude_detector, nudenet_labels_index, mask_shapes_func_dict
from scripts.nudenet_nsfw_censor_scripts.censor_image_filters import apply_filter, filter_dict
from modules.api.api import decode_base64_to_image, encode_pil_to_base64
from fastapi import FastAPI, Body
from PIL import ImageFilter
from modules import shared
from math import sqrt
import gradio as gr
import numpy as np
def nudenet_censor_api(_: gr.Blocks, app: FastAPI):
@app.post("/nudenet/censor")
async def censor(
input_image: str = Body(None, title="base64 input image"),
input_mask: str = Body(None, title="base64 mask (optional)"),
enable_nudenet: bool = Body(True, title="Enable NudeNet mask detection"),
output_mask: bool = Body(None, title="return mask"),
| filter_type: str = Body(None, title=f"Name of censor filter: {list(filter_dict)}"), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: enkeejunior1/Diffusion-Pullback
# Path: src/models/improved_diffusion/fp16_util.py
def convert_module_to_f16(l):
"""
Convert primitive modules to float16.
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.half()
l.bias.data = l.bias.data.half()
# Path: src/models/improved_diffusion/fp16_util.py
def convert_module_to_f32(l):
"""
Convert primitive modules to float32, undoing convert_module_to_f16().
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.float()
l.bias.data = l.bias.data.float()
# Path: src/models/improved_diffusion/nn.py
class SiLU(nn.Module):
def forward(self, x):
return x * th.sigmoid(x)
# Path: src/models/improved_diffusion/nn.py
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
# Path: src/models/improved_diffusion/nn.py
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
# Path: src/models/improved_diffusion/nn.py
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
# Path: src/models/improved_diffusion/nn.py
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
# Path: src/models/improved_diffusion/nn.py
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
# Path: src/models/improved_diffusion/nn.py
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = th.exp(
-math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
if dim % 2:
embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
return embedding
# Path: src/models/improved_diffusion/nn.py
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
# Path: src/models/improved_diffusion/unet.py
from abc import abstractmethod
from einops import rearrange, reduce, repeat, einsum
from .fp16_util import convert_module_to_f16, convert_module_to_f32
from .nn import (
SiLU,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
checkpoint,
)
import math
import time
import torchvision.utils as tvu
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2):
super().__init__()
self.channels = channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
| self.conv = conv_nd(dims, channels, channels, 3, padding=1) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: NVIDIA-Omniverse/IsaacSim-Automator
# Path: src/python/utils.py
def colorize_error(text):
return click.style(text, fg="bright_red", italic=True)
# Path: src/python/utils.py
def colorize_info(text):
return click.style(text, fg="bright_magenta", italic=True)
# Path: src/python/utils.py
def colorize_prompt(text):
return click.style(text, fg="bright_cyan", italic=True)
# Path: src/python/utils.py
def colorize_result(text):
return click.style(text, fg="bright_green", italic=True)
# Path: src/python/utils.py
def read_meta(deployment_name: str, verbose: bool = False):
"""
Read metadata from json file
"""
meta_file = f"{config['state_dir']}/{deployment_name}/meta.json"
if os.path.isfile(meta_file):
data = json.loads(Path(meta_file).read_text())
if verbose:
click.echo(colorize_info(f"* Meta info loaded from '{meta_file}'"))
return data
raise Exception(f"Meta file '{meta_file}' not found")
# Path: src/python/utils.py
def shell_command(
command, verbose=False, cwd=None, exit_on_error=True, capture_output=False
):
"""
Execute shell command, print it if debug is enabled
"""
if verbose:
if cwd is not None:
click.echo(colorize_info(f"* Running `(cd {cwd} && {command})`..."))
else:
click.echo(colorize_info(f"* Running `{command}`..."))
res = subprocess.run(
command,
shell=True,
cwd=cwd,
capture_output=capture_output,
)
if res.returncode == 0:
if verbose and res.stdout is not None:
click.echo(res.stdout.decode())
elif exit_on_error:
if res.stderr is not None:
click.echo(
colorize_error(f"Error: {res.stderr.decode()}"),
err=True,
)
exit(1)
return res
# Path: src/python/debug.py
def debug_break():
debug_start()
debugpy.breakpoint()
# Path: src/python/ngc.py
def check_ngc_access(ngc_api_key, org="", team="", verbose=False):
"""
Checks if NGC API key is valid and user has access to DRIVE Sim.
Returns:
- 0 - all is fine
- 100 - invalid api key
- 102 - user is not in the team
"""
proc = subprocess.run(
[f"{SELF_DIR}/ngc_check.expect", ngc_api_key, org, team],
capture_output=not verbose,
timeout=60,
)
if proc.returncode not in [0, 100, 101, 102]:
raise RuntimeError(
f"Error checking NGC API Key. Return code: {proc.returncode}"
)
return proc.returncode
# Path: src/python/deployer.py
import json
import os
import re
import shlex
import sys
import click
from pathlib import Path
from src.python.utils import (
colorize_error,
colorize_info,
colorize_prompt,
colorize_result,
read_meta,
shell_command,
)
from src.python.debug import debug_break # noqa
from src.python.ngc import check_ngc_access
# region copyright
# Copyright 2023 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# endregion
class Deployer:
def __init__(self, params, config):
self.tf_outputs = {}
self.params = params
self.config = config
self.existing_behavior = None
# save original params so we can recreate command line
self.input_params = params.copy()
# convert "in_china"
self.params["in_china"] = {"yes": True, "no": False, "auto": False}[
self.params["in_china"]
]
# create state directory if it doesn't exist
os.makedirs(self.config["state_dir"], exist_ok=True)
# print complete command line
if self.params["debug"]:
click.echo(colorize_info("* Command:\n" + self.recreate_command_line()))
def __del__(self):
# update meta info
self.save_meta()
def save_meta(self):
"""
Save command parameters in json file, just in case
"""
meta_file = (
f"{self.config['state_dir']}/{self.params['deployment_name']}/meta.json"
)
data = {
"command": self.recreate_command_line(separator=" "),
"input_params": self.input_params,
"params": self.params,
"config": self.config,
}
Path(meta_file).parent.mkdir(parents=True, exist_ok=True)
Path(meta_file).write_text(json.dumps(data, indent=4))
if self.params["debug"]:
click.echo(colorize_info(f"* Meta info saved to '{meta_file}'"))
| def read_meta(self): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: blackgold3/SemanticBoost
# Path: mdm/model/clip/model.py
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith("transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
# Path: mdm/model/clip/simple_tokenizer.py
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
# Path: mdm/model/clip/clip.py
import hashlib
import os
import urllib
import warnings
import torch
from typing import Any, Union, List
from pkg_resources import packaging
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
from torchvision.transforms import InterpolationMode
try:
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
| _tokenizer = _Tokenizer() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: justchenhao/SILI_CD
# Path: datasets/transforms.py
def get_transforms(norm=False, img_size=256):
basic_transform = []
basic_transform.append(T.ToTensor()) # ndarray转为 torch.FloatTensor, 范围[0,1]
if norm:
basic_transform.append(T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]))
basic_transform.append(T.Resize(size=(img_size, img_size), interpolation=InterpolationMode.BILINEAR))
return T.Compose(basic_transform)
# Path: datasets/transforms.py
def get_mask_transforms(img_size=256):
basic_target_transform = T.Compose(
[
MaskToTensor(),
T.Resize(size=(img_size, img_size), interpolation=InterpolationMode.NEAREST),
]
)
return basic_target_transform
# Path: datasets/transforms.py
def get_seg_augs(imgz_size=256, data_keys=("input", "mask")):
default_seg_augs = K.AugmentationSequential(
K.RandomHorizontalFlip(p=0.5),
K.RandomVerticalFlip(p=0.5),
K.RandomResizedCrop(
size=(imgz_size, imgz_size), scale=(0.8, 1.0), resample="bilinear", align_corners=False
),
K.RandomGaussianBlur(kernel_size=(3, 3), sigma=(0.1, 2.0), p=0.5),
K.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
data_keys=data_keys
)
return default_seg_augs
# Path: misc/torchutils.py
def visualize_tensors(*tensors):
"""
可视化tensor,支持单通道特征或3通道图像
:param tensors: tensor: C*H*W, C=1/3
:return:
"""
import matplotlib.pyplot as plt
# from misc.torchutils import tensor2np
images = []
for tensor in tensors:
assert tensor.ndim == 3 or tensor.ndim==2
if tensor.ndim ==3:
assert tensor.shape[0] == 1 or tensor.shape[0] == 3
images.append(tensor2np(tensor))
nums = len(images)
if nums>1:
fig, axs = plt.subplots(1, nums)
for i, image in enumerate(images):
axs[i].imshow(image, cmap='jet')
plt.show()
elif nums == 1:
fig, ax = plt.subplots(1, nums)
for i, image in enumerate(images):
ax.imshow(image, cmap='jet')
plt.show()
# Path: datasets/base_dataset.py
import os
import numpy as np
import torch
from typing import Dict, Sequence, Tuple, Optional, Union
from PIL import Image
from torch.utils import data
from datasets.transforms import get_transforms, get_mask_transforms
from datasets.transforms import get_seg_augs
from misc.imutils import pil_rescale, pil_resize
from misc.imutils import pil_rescale, pil_resize
from misc.torchutils import visualize_tensors
"""
some basic data loader
for example:
Image loader, Segmentation loader,
data root
├─A
├─label
└─list
"""
def load_img_name_list(dataset_path):
img_name_list = np.loadtxt(dataset_path, dtype=str)
if img_name_list.ndim == 2:
return img_name_list[:, 0]
return img_name_list
class ImageDataset(data.Dataset):
"""list dataloder"""
def __init__(self, root_dir: str,
split: str = 'train',
img_size: int = 256,
norm: bool = False,
img_folder_name: Union[str, list, tuple] = 'A',
list_folder_name: str = 'list',
scale_ratios: Union[int, list] = 1):
super(ImageDataset, self).__init__()
self.root_dir = root_dir
self.split = split # train | train_aug | val
self.list_path = os.path.join(self.root_dir, list_folder_name, self.split+'.txt')
self.img_name_list = load_img_name_list(self.list_path)
if isinstance(img_folder_name, list) or isinstance(img_folder_name, tuple):
# 此处为了兼容存在多个img_folder,内部文件共名字的情况,比如img_folder_name=['A','B']
self.img_folder_with_name_list = [img_folder_name_+'/'+name
for name in self.img_name_list
for img_folder_name_ in img_folder_name]
elif isinstance(img_folder_name, str):
self.img_folder_with_name_list = [img_folder_name+'/'+name
for name in self.img_name_list]
else:
raise NotImplementedError
self.A_size = len(self.img_folder_with_name_list) # get the size of dataset A
self.img_folder_name = img_folder_name
self.img_size = img_size
self.norm = norm
self.basic_transforms = get_transforms(norm=norm, img_size=img_size)
self.scale_ratios = scale_ratios
def __getitem__(self, index):
folder_with_name = self.img_folder_with_name_list[index % self.A_size]
img_folder_name = folder_with_name.split('/')[0]
name = folder_with_name.split('/')[-1]
A_path = os.path.join(self.root_dir, img_folder_name, name)
img = np.asarray(Image.open(A_path).convert('RGB'))
scales = self.scale_ratios
if isinstance(scales, list):
scale = scales[torch.randint(len(scales), (1,)).item()]
else:
scale = scales
if scale != 1:
h, w = img.shape[:2]
img = pil_rescale(img, scale=scale, order=3)
img = pil_resize(img, size=[h, w], order=3)
if self.basic_transforms is not None:
img = self.basic_transforms(img)
return {'A': img, 'name': name}
def __len__(self):
"""Return the total number of images in the dataset."""
return self.A_size
class SegDataset(ImageDataset):
'''
transforms: 表示同时对image 和 mask 做变换;
'''
def __init__(self,
root_dir: str,
split: str = 'train',
img_size: int = 256,
norm: bool = False,
img_folder_name: Union[str, list, tuple] = 'A',
label_transform: str = 'norm',
label_folder_name: str = 'label',
scale_ratios: Union[int, list] = 1):
super(SegDataset, self).__init__(root_dir, split=split,
img_size=img_size,
norm=norm,
img_folder_name=img_folder_name,
scale_ratios=scale_ratios)
| self.basic_mask_transforms = get_mask_transforms(img_size=img_size) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: pythonlessons/FinRock
# Path: finrock/render.py
class RenderOptions:
def __init__(
self,
name: str,
color: tuple,
window_type: WindowType,
render_type: RenderType,
min: float,
max: float,
value: float = None,
):
self.name = name
self.color = color
self.window_type = window_type
self.render_type = render_type
self.min = min
self.max = max
self.value = value
def copy(self):
return RenderOptions(
name=self.name,
color=self.color,
window_type=self.window_type,
render_type=self.render_type,
min=self.min,
max=self.max,
value=self.value
)
# Path: finrock/render.py
class RenderType(Enum):
LINE = 0
DOT = 1
# Path: finrock/render.py
class WindowType(Enum):
MAIN = 0
SEPERATE = 1
# Path: finrock/indicators.py
import pandas as pd
from .render import RenderOptions, RenderType, WindowType
class Indicator:
""" Base class for indicators
"""
def __init__(
self,
data: pd.DataFrame,
target_column: str='close',
render_options: dict={}
) -> None:
self._data = data.copy()
self._target_column = target_column
self._render_options = render_options
self.values = {}
assert isinstance(self._data, pd.DataFrame) == True, "data must be a pandas.DataFrame"
assert self._target_column in self._data.columns, f"data must have '{self._target_column}' column"
self.compute()
if not self._render_options:
self._render_options = self.default_render_options()
@property
def min(self):
return self._data[self.target_column].min()
@property
def max(self):
return self._data[self.target_column].max()
@property
def target_column(self):
return self._target_column
@property
def name(self):
return self.__class__.__name__
@property
def names(self):
return self._names
def compute(self):
raise NotImplementedError
def default_render_options(self):
return {}
def render_options(self):
return {name: option.copy() for name, option in self._render_options.items()}
def __getitem__(self, index: int):
row = self._data.iloc[index]
for name in self.names:
if pd.isna(row[name]):
return None
self.values[name] = row[name]
if self._render_options.get(name):
self._render_options[name].value = row[name]
return self.serialise()
def __call__(self, index: int):
return self[index]
def serialise(self):
return {
'name': self.name,
'names': self.names,
'values': self.values.copy(),
'target_column': self.target_column,
'render_options': self.render_options(),
'min': self.min,
'max': self.max
}
class SMA(Indicator):
""" Trend indicator
A simple moving average (SMA) calculates the average of a selected range of prices, usually closing prices, by the number
of periods in that range.
The SMA is a technical indicator for determining if an asset price will continue or reverse a bull or bear trend. It is
calculated by summing up the closing prices of a stock over time and then dividing that total by the number of time periods
being examined. Short-term averages respond quickly to changes in the price of the underlying, while long-term averages are
slow to react.
https://www.investopedia.com/terms/s/sma.asp
"""
def __init__(
self,
data: pd.DataFrame,
period: int=20,
target_column: str='close',
render_options: dict={}
):
self._period = period
self._names = [f'SMA{period}']
super().__init__(data, target_column, render_options)
@property
def min(self):
return self._data[self.names[0]].min()
@property
def max(self):
return self._data[self.names[0]].max()
def default_render_options(self):
return {name: RenderOptions(
name=name,
color=(100, 100, 255),
| window_type=WindowType.MAIN, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: hitlic/deepepochs
# Path: deepepochs/loops.py
def sum_dicts(dicts, to_np=False):
dicts = concat_dicts(dicts, to_np)
return ddict({k: sum(v) for k, v in dicts.items()})
# Path: deepepochs/loops.py
class ddict(dict):
"""
可以通过“.”访问的字典。
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
if isinstance(v, dict):
self[k] = ddict(v)
else:
self[k] = v
if kwargs:
for k, v in kwargs.items():
if isinstance(v, dict):
self[k] = ddict(v)
else:
self[k] = v
def __getattr__(self, key):
value = self[key]
return value
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super().__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super().__delitem__(key)
del self.__dict__[key]
def __deepcopy__(self, memo=None, _nil=[]): # pylint: disable=W0102
dd = dict(self)
return deepcopy(dd)
# Path: deepepochs/metrics.py
from functools import lru_cache
from .loops import sum_dicts, ddict
import torch
"""
@author: liuchen
"""
@lru_cache(maxsize=1)
def confusion_matrix(preds, targets, num_classes):
"""
Args:
preds: 预测向量,可为binary或多维概率分布
targets: 标签向量,可为one-hot或非one-hot的
num_class: 类别数量
"""
if (preds.dim()==1 or preds.shape[-1]==1) and num_classes==2: # 当预测为binary时
preds = preds.unsqueeze(-1) if preds.dim()==1 else preds
preds = torch.concat([1-preds, preds], dim=-1)
preds = preds.argmax(dim=-1).flatten().int()
if targets.dim() > 1 and targets.shape[-1] > 1: # 当targets为one-hot时
targets = targets.argmax(dim=1).int()
else:
targets = targets.flatten().int()
cm = torch.zeros([num_classes, num_classes], dtype=preds.dtype, device=preds.device)
one = torch.tensor([1], dtype=preds.dtype, device=preds.device)
return cm.index_put_((targets, preds), one, accumulate=True)
@lru_cache(maxsize=1)
def cmats_and_weights(c_mat):
"""获取各类别的混淆矩阵和权值"""
if c_mat.shape[0] == 2:
| c_mat = ddict({ |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: colour-science/colour-visuals
# Path: colour_visuals/common.py
DEFAULT_FLOAT_DTYPE_WGPU = np.float32
# Path: colour_visuals/common.py
def unlatexify(text: str) -> str:
"""
Unlatexify given string.
Parameters
----------
text
String to remove the *LaTeX* character markup from.
Returns
-------
:class:`str`
Unlatexified string.
"""
return re.sub(r"[$^_{}]", "", text)
# Path: colour_visuals/visual.py
class MixinPropertyModel:
"""
Define a mixin for a colourspace model.
Attributes
----------
- :attr:`~colour_visuals.visual.MixinPropertyModel.model`
"""
def __init__(self):
self._model = "CIE xyY"
super().__init__()
@visual_property
def model(self) -> LiteralColourspaceModel | str:
"""
Getter and setter property for the colourspace model.
Parameters
----------
value
Value to set the colourspace model with.
Returns
-------
:class:`str`
Colourspace model.
"""
return self._model
@model.setter
def model(self, value: LiteralColourspaceModel | str):
"""Setter for the **self.model** property."""
self._model = validate_method(value, tuple(COLOURSPACE_MODELS))
# Path: colour_visuals/visual.py
class MixinPropertySize:
"""
Define a mixin for a size value.
Attributes
----------
- :attr:`~colour_visuals.visual.MixinPropertySize.size`
"""
def __init__(self):
self._size = 1
super().__init__()
@visual_property
def size(self) -> float:
"""
Getter and setter property for the size value.
Parameters
----------
value
Value to set size value with.
Returns
-------
:class:`int`
Size value.
"""
return self._size
@size.setter
def size(self, value: float):
"""Setter for the **self.size** property."""
self._size = value
# Path: colour_visuals/visual.py
class Visual(gfx.Group, metaclass=ABCMeta):
"""Define the base class for the visuals."""
def __init__(self):
self._is_update_blocked = False
super().__init__()
@contextmanager
def block_update(self) -> Generator:
"""Define a context manager that blocks the visual updates."""
self._is_update_blocked = True
yield
self._is_update_blocked = False
@abstractmethod
def update(self):
"""
Update the visual.
Notes
-----
- Must be reimplemented by sub-classes.
"""
# Path: colour_visuals/axes.py
import numpy as np
import pygfx as gfx
from colour.hints import LiteralColourspaceModel
from colour.models import COLOURSPACE_MODELS_AXIS_LABELS
from colour.plotting import (
CONSTANTS_COLOUR_STYLE,
colourspace_model_axis_reorder,
)
from colour.utilities import as_int_array
from colour_visuals.common import (
DEFAULT_FLOAT_DTYPE_WGPU,
unlatexify,
)
from colour_visuals.visual import (
MixinPropertyModel,
MixinPropertySize,
Visual,
)
# !/usr/bin/env python
"""
Axes Visuals
============
Defines the axes visuals:
- :class:`colour_visuals.VisualAxes`
"""
from __future__ import annotations
__author__ = "Colour Developers"
__copyright__ = "Copyright 2023 Colour Developers"
__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = ["VisualAxes"]
| class VisualAxes(MixinPropertyModel, MixinPropertySize, Visual): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: JiahuiLei/NAP
# Path: core/models/utils/misc.py
def cfg_with_default(cfg, key_list, default):
root = cfg
for k in key_list:
if k in root.keys():
root = root[k]
else:
return default
return root
# Path: object_utils/arti_graph_utils_v3.py
def compact_pack(V, E, K=25, permute=True):
if len(V) > K:
print(f"Warning, extend {K} to {len(V)}")
K = len(V)
num_v = len(V)
n_empty = K - num_v
# Nodes
v_mask = np.zeros(K, dtype=np.bool)
v_mask[: len(V)] = True
if permute:
# in the origin index, first num_v are object
v_map = np.random.permutation(K).tolist() # stores the original id
else:
v_map = np.arange(K).tolist()
v_mask = [v_mask[i] for i in v_map]
_v_bbox = [v["bbox_L"] for v in V] + [np.zeros(3)] * n_empty
v_bbox = [_v_bbox[i] for i in v_map]
v_bbox = torch.from_numpy(np.stack(v_bbox, axis=0)).float()
# p_global = T_gl @ p_local
_v_t_gl = [v["abs_center"] for v in V] + [np.zeros(3)] * n_empty
v_t_gl = [_v_t_gl[i] for i in v_map]
v_t_gl = torch.from_numpy(np.stack(v_t_gl, axis=0)).float()
# ! Now assume partnet-M all init part R = I
v_r_gl = torch.zeros(K, 3).float()
ret_v = torch.cat([torch.LongTensor(v_mask)[..., None], v_bbox, v_r_gl, v_t_gl], -1)
# Edges
total_edges = int(K * (K - 1) / 2) # include invalid
e_plucker = torch.zeros((total_edges, 6), dtype=torch.float32)
e_lim = torch.zeros((total_edges, 4), dtype=torch.float32)
e_type = torch.zeros((total_edges), dtype=torch.long) # [0,1,2] [empty, ij, ji]
for e in E:
# ! by default, the list of edges represent the upper triangle, i.e. row i, col j, then i < j
_src_ind, _dst_ind = e["e0"]["src_ind"], e["e0"]["dst_ind"]
src_ind, dst_ind = v_map.index(_src_ind), v_map.index(_dst_ind)
plucker = e["e0"]["plucker"]
# transform the plucker to global frame
_r_global = v_r_gl[src_ind]
_t_global = v_t_gl[src_ind]
plucker_global = torch.from_numpy(plucker.copy()).float()
_R_global = axis_angle_to_matrix(_r_global)
_lg = _R_global @ plucker_global[:3]
_mg = _R_global @ plucker_global[3:] + torch.cross(_t_global, _lg)
plucker_global = torch.cat([_lg, _mg], 0)
flip = plucker_need_flip(plucker_global)
if flip: # orient the global plucker to hemisphere
plucker_global = -plucker_global
if src_ind > dst_ind: # i = dst, j = src
i, j = dst_ind, src_ind
flip = not flip # when reverse the src and dst, the plucker should multiply by -1.0
elif src_ind < dst_ind:
i, j = src_ind, dst_ind
else:
raise ValueError("src_ind == dst_ind")
e_list_ind = map_upper_triangle_to_list(i, j, K)
if flip: # 2 is flip plucker
e_type[e_list_ind] = 2
else: # 1 is not flip plucker
e_type[e_list_ind] = 1
e_lim[e_list_ind, :2] = torch.Tensor(e["r_limits"])
e_lim[e_list_ind, 2:] = torch.Tensor(e["p_limits"])
# # debug
# print(e["r_limits"], e["p_limits"])
# assert e["r_limits"][0] <= e["r_limits"][1]
# assert e["p_limits"][0] <= e["p_limits"][1]
e_plucker[e_list_ind] = plucker_global
e_type = F.one_hot(e_type, num_classes=3).float()
ret_e = torch.cat([e_type, e_plucker, e_lim], dim=1)
# v: [mask_occ(1), bbox(3), r_gl(3), t_gl(3) | additional codes in the future]
# e: [type(3), plucker(6), rlim(2), plim(2)]
return ret_v, ret_e, v_map
# Path: object_utils/arti_graph_utils_v3.py
def map_upper_triangle_to_list(i, j, K):
assert i < j, "not upper triangle"
e_list_ind = i * (2 * K - i - 1) // 2 + j - i - 1
return e_list_ind
# Path: dataset/partnet_m_grouping.py
from random import random
from torch.utils.data import Dataset
from os.path import join
from core.models.utils.misc import cfg_with_default
from tqdm import tqdm
from object_utils.arti_graph_utils_v3 import compact_pack, map_upper_triangle_to_list
from copy import deepcopy
from torch.utils.data import WeightedRandomSampler
import logging
import json
import os
import os.path as osp
import numpy as np
import torch
import json
# Load processed PartNet-Mobility graph
# v5: from v4 use new full random permute, not first 1 v_mask
class Dataset(Dataset):
def __init__(self, cfg, mode) -> None:
super().__init__()
d_cfg = cfg["dataset"]
self.mode = mode.lower()
self.dataset_proportion = d_cfg["dataset_proportion"][cfg["modes"].index(self.mode)]
self.data_root = join(cfg["root"], d_cfg["data_root"])
self.pad_nv = d_cfg["max_K"]
self.pad_np = d_cfg["max_P"]
self.n_pcl = d_cfg["n_pcl"]
self.valid_obj_ind = self.load_split(
d_cfg["split_path"], phase=self.mode, cates=d_cfg["cates"]
)
| self.balance_flag = cfg_with_default(d_cfg, ["balance_flag"], False) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: yongliang-wu/ExploreCfg
# Path: open_flamingo/src/helpers.py
class GatedCrossAttentionBlock(nn.Module):
def __init__(
self,
*,
dim,
dim_visual,
dim_head=64,
heads=8,
ff_mult=4,
only_attend_immediate_media=True,
):
super().__init__()
self.attn = MaskedCrossAttention(
dim=dim,
dim_visual=dim_visual,
dim_head=dim_head,
heads=heads,
only_attend_immediate_media=only_attend_immediate_media,
)
self.attn_gate = nn.Parameter(torch.tensor([0.0]))
self.ff = FeedForward(dim, mult=ff_mult)
self.ff_gate = nn.Parameter(torch.tensor([0.0]))
def forward(
self,
x,
media,
media_locations=None,
attend_previous=True,
):
x = (
self.attn(
x,
media,
media_locations=media_locations,
attend_previous=attend_previous,
)
* self.attn_gate.tanh()
+ x
)
x = self.ff(x) * self.ff_gate.tanh() + x
return x
# Path: open_flamingo/src/utils.py
def getattr_recursive(obj, att):
"""
Return nested attribute of obj
Example: getattr_recursive(obj, 'a.b.c') is equivalent to obj.a.b.c
"""
if att == "":
return obj
i = att.find(".")
if i < 0:
return getattr(obj, att)
else:
return getattr_recursive(getattr(obj, att[:i]), att[i + 1 :])
# Path: open_flamingo/src/utils.py
def setattr_recursive(obj, att, val):
"""
Set nested attribute of obj
Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val
"""
if "." in att:
obj = getattr_recursive(obj, ".".join(att.split(".")[:-1]))
setattr(obj, att.split(".")[-1], val)
# Path: open_flamingo/src/flamingo_lm.py
import random
import torch.nn as nn
from .helpers import GatedCrossAttentionBlock
from .utils import getattr_recursive, setattr_recursive
class FlamingoLayer(nn.Module):
def __init__(self, gated_cross_attn_layer, decoder_layer):
super().__init__()
self.gated_cross_attn_layer = gated_cross_attn_layer
self.decoder_layer = decoder_layer
self.vis_x = None
self.media_locations = None
def is_conditioned(self) -> bool:
"""Check whether the layer is conditioned."""
return self.vis_x is not None
# Used this great idea from this implementation of Flamingo (https://github.com/dhansmair/flamingo-mini/)
def condition_vis_x(self, vis_x):
self.vis_x = vis_x
def condition_media_locations(self, media_locations):
self.media_locations = media_locations
def condition_attend_previous(self, attend_previous):
self.attend_previous = attend_previous
def forward(
self,
lang_x,
attention_mask=None,
**decoder_layer_kwargs,
):
if self.gated_cross_attn_layer is None:
return self.decoder_layer(
lang_x, attention_mask=attention_mask, **decoder_layer_kwargs
)
if self.vis_x is None:
raise ValueError("vis_x must be conditioned before forward pass")
if self.media_locations is None:
raise ValueError("media_locations must be conditioned before forward pass")
lang_x = self.gated_cross_attn_layer(
lang_x,
self.vis_x,
media_locations=self.media_locations,
attend_previous=self.attend_previous,
)
lang_x = self.decoder_layer(
lang_x, attention_mask=attention_mask, **decoder_layer_kwargs
)
return lang_x
class FlamingoLMMixin(nn.Module):
"""
Mixin to add cross-attention layers to a language model.
"""
def set_decoder_layers_attr_name(self, decoder_layers_attr_name):
self.decoder_layers_attr_name = decoder_layers_attr_name
def _get_decoder_layers(self):
return getattr_recursive(self, self.decoder_layers_attr_name)
def _set_decoder_layers(self, value):
setattr_recursive(self, self.decoder_layers_attr_name, value)
def init_flamingo(
self,
media_token_id,
vis_hidden_size,
cross_attn_every_n_layers,
use_media_placement_augmentation,
):
"""
Initialize Flamingo by adding a new gated cross attn to the decoder. Store the media token id for computing the media locations.
"""
self.gated_cross_attn_layers = nn.ModuleList(
[
| GatedCrossAttentionBlock( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mimo-x/Code-Review-GPT-Gitlab
# Path: utils/logger.py
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.join(CURRENT_PATH, os.pardir)
LOG_PATH = os.path.join(parent_dir, 'logs')
class LogHandler(logging.Logger):
def __init__(self, name, level=INFO, stream=True, file=True):
def __setFileHandler__(self, level=None):
def __setStreamHandler__(self, level=None):
def resetName(self, name):
# Path: utils/dingding.py
@message_error_handler
def send_dingtalk_message_by_sign(message_text):
"""
使用签名方式发送消息通知到钉钉群
Args:
webhook_url (str): 钉钉群聊机器人的Webhook地址
secret (str): 机器人的安全设置中的密钥
message_text (str): 消息文本内容
Returns:
bool: 消息是否发送成功
"""
timestamp = str(round(time.time() * 1000))
sign = get_sign(timestamp)
webhookurl = f"{dingding_bot_webhook}×tamp={timestamp}&sign={sign}"
# 构建请求头
headers = {
"Content-Type": "application/json",
}
# 构建请求体
message = {
"msgtype": "text",
"text": {
"content": message_text
},
"timestamp": timestamp,
"sign": sign
}
# 发送HTTP POST请求
response = requests.post(
webhookurl,
headers=headers,
data=json.dumps(message)
)
# 检查响应
if response.status_code == 200:
print("消息已发送成功。")
return True
else:
print("消息发送失败,HTTP状态码:", response.status_code)
return False
# Path: app/gitlab_utils.py
import requests
from retrying import retry
from config.config import *
from utils.logger import log
from utils.dingding import send_dingtalk_message_by_sign
@retry(stop_max_attempt_number=3, wait_fixed=2000)
def get_merge_request_id(branch_name, project_id):
"""
根据分支名,获取mr_id
:param branch_name: 分支名
:param project_id: 项目id
:return: 如果分支存在 mr 则返回mrid / 如果不存在mr 则返回 ""
"""
# 构建API请求URL
url = f"{gitlab_server_url}/api/v4/projects/{project_id}/merge_requests"
# 发送API请求,检查是否有与分支相关的Merge Request
params = {
"source_branch": branch_name,
"state": "opened" # 可以根据需求选择合适的状态(opened、closed、merged等)
}
headers = {"Private-Token": gitlab_private_token}
response = requests.get(url, params=params, headers=headers)
# 解析JSON响应并检查是否有相关的Merge Request
if response.status_code == 200:
merge_requests = response.json()
if len(merge_requests) > 0:
| log.info(f"分支 '{branch_name}' 存在mr记录.{merge_requests}") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AI-Application-and-Integration-Lab/DGUA_FAS
# Path: util/dataset.py
class YunpeiDataset(Dataset):
def __init__(self, data_pd, transforms=None, train=True):
self.train = train
self.photo_path = data_pd['photo_path'].tolist()
self.photo_label = data_pd['photo_label'].tolist()
self.photo_belong_to_video_ID = data_pd['photo_belong_to_video_ID'].tolist()
if transforms is None:
if not train:
self.transforms = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
else:
self.transforms = T.Compose([
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
else:
self.transforms = transforms
def __len__(self):
return len(self.photo_path)
def __getitem__(self, item):
if self.train:
img_path = self.photo_path[item]
label = self.photo_label[item]
img = Image.open(img_path).resize((256, 256))
img = self.transforms(img)
return img, label
else:
img_path = self.photo_path[item]
label = self.photo_label[item]
videoID = self.photo_belong_to_video_ID[item]
img = Image.open(img_path).resize((256, 256))
img = self.transforms(img)
return img, label, videoID
# Path: util/utils.py
def sample_frames(flag, num_frames, dataset_name):
'''
from every video (frames) to sample num_frames to test
return: the choosen frames' path and label
'''
# The process is a litter cumbersome, you can change to your way for convenience
root_path = '../../data_label/' + dataset_name
if(flag == 0): # select the fake images
label_path = root_path + '/fake_label.json'
save_label_path = root_path + '/choose_fake_label.json'
elif(flag == 1): # select the real images
label_path = root_path + '/real_label.json'
save_label_path = root_path + '/choose_real_label.json'
else: # select all the real and fake images
label_path = root_path + '/all_label.json'
save_label_path = root_path + '/choose_all_label.json'
all_label_json = json.load(open(label_path, 'r'))
f_sample = open(save_label_path, 'w')
length = len(all_label_json)
# three componets: frame_prefix, frame_num, png
saved_frame_prefix = '/'.join(all_label_json[0]['photo_path'].split('/')[:-1])
final_json = []
video_number = 0
single_video_frame_list = []
single_video_frame_num = 0
single_video_label = 0
for i in range(length):
photo_path = all_label_json[i]['photo_path']
photo_label = all_label_json[i]['photo_label']
frame_prefix = '/'.join(photo_path.split('/')[:-1])
# the last frame
if (i == length - 1):
photo_frame = int(photo_path.split('/')[-1].split('.')[0])
single_video_frame_list.append(photo_frame)
single_video_frame_num += 1
single_video_label = photo_label
# a new video, so process the saved one
if (frame_prefix != saved_frame_prefix or i == length - 1):
# [1, 2, 3, 4,.....]
single_video_frame_list.sort()
frame_interval = math.floor(single_video_frame_num / num_frames)
for j in range(num_frames):
dict = {}
# dict['photo_path'] = saved_frame_prefix + '/' + str(
# single_video_frame_list[6 + j * frame_interval]) + '.png'
if dataset_name not in {'cefa', 'wmca_train', 'wmca_test'}:
dict['photo_path'] = saved_frame_prefix + '/' + f'{(single_video_frame_list[ j * frame_interval]):03d}' + '.png'
elif dataset_name == 'cefa':
print(single_video_frame_list)
print(saved_frame_prefix)
dict['photo_path'] = saved_frame_prefix + '/' + f'{(single_video_frame_list[6 + j * frame_interval]):04d}' + '.jpg'
else:
dict['photo_path'] = saved_frame_prefix + '/' + f'{(single_video_frame_list[j * frame_interval]):03d}' + '.jpg'
dict['photo_label'] = single_video_label
dict['photo_belong_to_video_ID'] = video_number
final_json.append(dict)
video_number += 1
saved_frame_prefix = frame_prefix
single_video_frame_list.clear()
single_video_frame_num = 0
# get every frame information
photo_frame = int(photo_path.split('/')[-1].split('.')[0])
single_video_frame_list.append(photo_frame)
single_video_frame_num += 1
single_video_label = photo_label
if(flag == 0):
print("Total video number(fake): ", video_number, dataset_name)
elif(flag == 1):
print("Total video number(real): ", video_number, dataset_name)
else:
print("Total video number(target): ", video_number, dataset_name)
json.dump(final_json, f_sample, indent=4)
f_sample.close()
f_json = open(save_label_path)
sample_data_pd = pd.read_json(f_json)
return sample_data_pd
# Path: util/get_loader.py
import os
import random
import numpy as np
import pandas as pd
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from util.dataset import YunpeiDataset
from util.utils import sample_frames
def get_dataset(src1_data, src1_train_num_frames, src2_data, src2_train_num_frames, src3_data, src3_train_num_frames,
tgt1_data, tgt_test_num_frames, batch_size):
print('Load Source Data')
print('Source Data: ', src1_data)
| src1_train_data_fake = sample_frames(flag=0, num_frames=src1_train_num_frames, dataset_name=src1_data) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: jianlanluo/SAQ
# Path: vqn/jax_utils.py
def next_rng(*args, **kwargs):
global jax_utils_rng
return jax_utils_rng(*args, **kwargs)
# Path: vqn/jax_utils.py
def value_and_multi_grad(fun, n_outputs, argnums=0, has_aux=False):
def select_output(index):
def wrapped(*args, **kwargs):
if has_aux:
x, *aux = fun(*args, **kwargs)
return (x[index], *aux)
else:
x = fun(*args, **kwargs)
return x[index]
return wrapped
grad_fns = tuple(
jax.value_and_grad(select_output(i), argnums=argnums, has_aux=has_aux)
for i in range(n_outputs)
)
def multi_grad_fn(*args, **kwargs):
grads = []
values = []
for grad_fn in grad_fns:
(value, *aux), grad = grad_fn(*args, **kwargs)
values.append(value)
grads.append(grad)
return (tuple(values), *aux), tuple(grads)
return multi_grad_fn
# Path: vqn/jax_utils.py
def mse_loss(val, target):
return jnp.mean(jnp.square(val - target))
# Path: vqn/jax_utils.py
class JaxRNG(object):
""" A convenient stateful Jax RNG wrapper. Can be used to wrap RNG inside
pure function.
"""
@classmethod
def from_seed(cls, seed):
return cls(jax.random.PRNGKey(seed))
def __init__(self, rng):
self.rng = rng
def __call__(self, keys=None):
if keys is None:
self.rng, split_rng = jax.random.split(self.rng)
return split_rng
elif isinstance(keys, int):
split_rngs = jax.random.split(self.rng, num=keys + 1)
self.rng = split_rngs[0]
return tuple(split_rngs[1:])
else:
split_rngs = jax.random.split(self.rng, num=len(keys) + 1)
self.rng = split_rngs[0]
return {key: val for key, val in zip(keys, split_rngs[1:])}
# Path: vqn/jax_utils.py
def wrap_function_with_rng(rng):
""" To be used as decorator, automatically bookkeep a RNG for the wrapped function. """
def wrap_function(function):
def wrapped(*args, **kwargs):
nonlocal rng
rng, split_rng = jax.random.split(rng)
return function(split_rng, *args, **kwargs)
return wrapped
return wrap_function
# Path: vqn/jax_utils.py
def collect_jax_metrics(metrics, names, prefix=None):
collected = {}
for name in names:
if name in metrics:
collected[name] = jnp.mean(metrics[name])
if prefix is not None:
collected = {
'{}/{}'.format(prefix, key): value for key, value in collected.items()
}
return collected
# Path: vqn/model.py
class Scalar(nn.Module):
init_value: float
def setup(self):
self.value = self.param('value', lambda x:self.init_value)
def __call__(self):
return self.value
# Path: vqn/model.py
def update_target_network(main_params, target_params, tau):
return jax.tree_util.tree_map(
lambda x, y: tau * x + (1.0 - tau) * y,
main_params, target_params
)
# Path: vqn/utils.py
def prefix_metrics(metrics, prefix):
return {
'{}/{}'.format(prefix, key): value for key, value in metrics.items()
}
# Path: vqn/conservative_sac.py
from collections import OrderedDict
from copy import deepcopy
from functools import partial
from ml_collections import ConfigDict
from flax.training.train_state import TrainState
from .jax_utils import (
next_rng, value_and_multi_grad, mse_loss, JaxRNG, wrap_function_with_rng,
collect_jax_metrics
)
from .model import Scalar, update_target_network
from .utils import prefix_metrics
import numpy as np
import jax
import jax.numpy as jnp
import flax
import flax.linen as nn
import optax
import distrax
class ConservativeSAC(object):
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.discount = 0.99
config.alpha_multiplier = 0.0
config.use_automatic_entropy_tuning = False
config.backup_entropy = False
config.target_entropy = 0.0
config.policy_lr = 3e-4
config.policy_weight_decay = 0.0
config.qf_lr = 3e-4
config.qf_weight_decay = 0.0
config.optimizer_type = 'adam'
config.soft_target_update_rate = 5e-3
config.use_cql = False
config.cql_n_actions = 10
config.cql_importance_sample = True
config.cql_lagrange = False
config.cql_target_action_gap = 1.0
config.cql_temp = 1.0
config.cql_min_q_weight = 5.0
config.cql_max_target_backup = False
config.cql_clip_diff_min = -np.inf
config.cql_clip_diff_max = np.inf
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
def __init__(self, config, policy, qf):
self.config = self.get_default_config(config)
self.policy = policy
self.qf = qf
self.observation_dim = policy.observation_dim
self.action_dim = policy.action_dim
self._train_states = {}
optimizer_class = {
'adam': optax.adam,
'sgd': optax.sgd,
}[self.config.optimizer_type]
policy_params = self.policy.init(
| next_rng(self.policy.rng_keys()), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dpaleka/llm-chess-proofgame
# Path: puzzle_solver.py
def convert_pgn_to_game(pgn_moves):
pgn = io.StringIO(pgn_moves)
game = chess.pgn.read_game(pgn)
if len(game.errors) > 0:
return None
return game
# Path: puzzle_solver.py
def solve_puzzle(board, solution, engine):
solution = solution.split()
while True:
guess_next_move = engine.get_best_move(board)
real_next_move, *solution = solution
if guess_next_move != real_next_move:
try:
board.push_san(guess_next_move)
if board.is_checkmate():
return True
except:
pass
return False
board.push_san(guess_next_move)
if len(solution) > 0:
opponent_move, *solution = solution
board.push_san(opponent_move)
else:
break
return True
# Path: puzzle_pair_solve.py
import chess
import numpy as np
import io
import json
import csv
import chessllm
from pathlib import Path
from tqdm import tqdm
from puzzle_solver import convert_pgn_to_game, solve_puzzle
from matplotlib import pyplot as plt
DATA_DIR = Path("/data/chess-data/lichess_puzzles")
FILE_NAME = DATA_DIR / "pairs.csv"
"""
Solve puzzle pairs given in FILE_NAME, and report whether the model can solve them.
Separate by rating buckets; take 40 samples from each bucket.
It has the following columns: uid,rating,pgn,proofgame,solution
Helper functions:
def solve_puzzle(board, solution) -> bool: whether model can solve the puzzle
convert_pgn_to_game(pgn_moves) -> game
"""
DATA_DIR = Path("/data/chess-data/lichess_puzzles")
FILE_NAME = DATA_DIR / "pairs.csv"
def plot_acc_pairs(engine, bucket_size=200, enough_samples=10):
# Create buckets
buckets = {i*bucket_size: [] for i in range(30)}
# Read the data and sort into buckets
with open(FILE_NAME) as f:
reader = csv.reader(f)
print(reader.__next__())
for uid, rating, pgn, proofgame, solution in tqdm(list(reader)):
rating_bucket = int(rating) // bucket_size * bucket_size
if len(buckets[rating_bucket]) < enough_samples:
buckets[rating_bucket].append((pgn, proofgame, solution))
# print how many elems in buckets
for k, v in buckets.items():
print(f'rating [{k}, {k + bucket_size})', 'n', len(v))
nonempty_buckets = [k for k, v in buckets.items() if len(v) > 0]
# Test the puzzles
ok_pgn = {i*bucket_size: [] for i in range(30)}
ok_proofgame = {i*bucket_size: [] for i in range(30)}
for rating_bucket, puzzles in tqdm(buckets.items()):
for pgn, proofgame, solution in puzzles:
board_pgn = chess.Board()
board_proofgame = chess.Board()
print("pgn origi", pgn)
print("proofgame", proofgame)
# Iterate over the moves and apply them to the board
for move in convert_pgn_to_game(pgn).mainline_moves():
board_pgn.push(move)
for move in convert_pgn_to_game(proofgame).mainline_moves():
board_proofgame.push(move)
| is_right_pgn = solve_puzzle(board_pgn, solution, engine) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Azure/azure-openai-benchmark
# Path: benchmark/tokenizecmd.py
def tokenize(args):
"""
Count number of tokens for given input and model. It attempts to decode
input as json chat messages. Otherwise, it assumes input is just text.
Return: number of tokens.
"""
model = args.model
text = args.text
if text is None:
logging.info("no input text given, reading starding in")
text = sys.stdin.read()
count = 0
try:
data = json.loads(text)
count = num_tokens_from_messages(data, model)
except json.JSONDecodeError:
logging.info("input does not seem to be json formatted, assuming text")
count = num_tokens_from_text(text, model)
print(f"tokens: {count}")
# Path: benchmark/loadcmd.py
def load(args):
try:
_validate(args)
except ValueError as e:
print(f"invalid argument(s): {e}")
sys.exit(1)
api_key = os.getenv(args.api_key_env)
url = args.api_base_endpoint[0] + "/openai/deployments/" + args.deployment + "/chat/completions"
url += "?api-version=" + args.api_version
rate_limiter = NoRateLimiter()
if args.rate is not None and args.rate > 0:
rate_limiter = RateLimiter(args.rate, 60)
max_tokens = args.max_tokens
context_tokens = args.context_tokens
if args.shape_profile == "balanced":
context_tokens = 500
max_tokens = 500
elif args.shape_profile == "context":
context_tokens = 2000
max_tokens = 200
elif args.shape_profile == "generation":
context_tokens = 500
max_tokens = 1000
logging.info(f"using shape profile {args.shape_profile}: context tokens: {context_tokens}, max tokens: {max_tokens}")
request_builder = _RequestBuilder("gpt-4-0613", context_tokens,
max_tokens=max_tokens,
completions=args.completions,
frequence_penalty=args.frequency_penalty,
presence_penalty=args.presence_penalty,
temperature=args.temperature,
top_p=args.top_p)
logging.info("starting load...")
_run_load(request_builder,
max_concurrency=args.clients,
api_key=api_key,
url=url,
rate_limiter=rate_limiter,
backoff=args.retry=="exponential",
request_count=args.requests,
duration=args.duration,
aggregation_duration=args.aggregation_window,
json_output=args.output_format=="jsonl")
# Path: benchmark/bench.py
import argparse
import logging
from .tokenizecmd import tokenize
from .loadcmd import load
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
parser = argparse.ArgumentParser(description="Benchmarking tool for Azure OpenAI Provisioned Throughput Units (PTUs).")
sub_parsers = parser.add_subparsers()
load_parser = sub_parsers.add_parser("load", help="Run load generation tool.")
load_parser.add_argument("-a", "--api-version", type=str, default="2023-05-15", help="Set OpenAI API version.")
load_parser.add_argument("-k", "--api-key-env", type=str, default="OPENAI_API_KEY", help="Environment variable that contains the API KEY.")
load_parser.add_argument("-c", "--clients", type=int, default=20, help="Set number of parallel clients to use for load generation.")
load_parser.add_argument("-n", "--requests", type=int, help="Number of requests for the load run. Default to 'until killed'.")
load_parser.add_argument("-d", "--duration", type=int, help="Duration of load in seconds. Defaults to 'until killed'.")
load_parser.add_argument("-r", "--rate", type=float, help="Rate of request generation in Requests Per Minute (RPM). Default to as fast as possible.")
load_parser.add_argument("-w", "--aggregation-window", type=float, default=60, help="Statistics aggregation sliding window duration in seconds. See README.md for more details.")
load_parser.add_argument("-s", "--shape-profile", type=str, default="balanced", help="Shape profile of requests.", choices=["balanced", "context", "generation", "custom"])
load_parser.add_argument("-p", "--context-tokens", type=int, help="Number of context tokens to use when --shape-profile=custom.")
load_parser.add_argument("-m", "--max-tokens", type=int, help="Number of requested max_tokens when --shape-profile=custom. Defaults to unset.")
load_parser.add_argument("-i", "--completions", type=int, default=1, help="Number of completion for each request.")
load_parser.add_argument("--frequency-penalty", type=float, help="Request frequency_penalty.")
load_parser.add_argument("--presence-penalty", type=float, help="Request frequency_penalty.")
load_parser.add_argument("--temperature", type=float, help="Request temperature.")
load_parser.add_argument("--top-p", type=float, help="Request top_p.")
load_parser.add_argument("-f", "--output-format", type=str, default="human", help="Output format.", choices=["jsonl", "human"])
load_parser.add_argument("-t", "--retry", type=str, default="none", help="Request retry strategy.", choices=["none", "exponential"])
load_parser.add_argument("-e", "--deployment", type=str, help="Azure OpenAI deployment name.", required=True)
load_parser.add_argument("api_base_endpoint", help="Azure OpenAI deployment base endpoint.", nargs=1)
| load_parser.set_defaults(func=load) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: pytest-visual/pytest-visual
# Path: visual/lib/convenience.py
def ceil_division(n, d):
return (n + d - 1) // d
# Path: visual/lib/convenience.py
def correct_layout(image: np.ndarray, layout: str) -> np.ndarray:
if layout[0] == "1":
image = np.squeeze(image, axis=0)
layout = layout[1:]
if layout[0] == "c":
image = np.moveaxis(image, 0, -1)
layout = layout[1:] + "c"
return image
# Path: visual/lib/convenience.py
def get_grid_shape(num_images: int, max_cols: int) -> Tuple[int, int]:
"""
Calculate the shape of the grid of images to show.
"""
rows = ceil_division(num_images, max_cols)
cols = ceil_division(num_images, rows)
return rows, cols
# Path: visual/lib/convenience.py
def get_image_max_value_from_type(max_value: Optional[float], image: np.ndarray) -> float:
"""
Get or calculate the maximum value of the image.
"""
if max_value is not None:
return max_value
if image.dtype in [np.uint8, np.uint16, np.uint32, np.uint64, np.int16, np.int32, np.int64]:
return 255.0
if image.dtype in [np.float16, np.float32, np.float64]:
return 1.0
raise ValueError(f"Could not determine max value from image with dtype {image.dtype}")
# Path: visual/lib/convenience.py
def get_layout_from_image(layout: Optional[str], image: np.ndarray) -> str:
"""
Get or calculate the layout of the grid of images to show.
Possible values: "hwc", "chw", "hw", "1chw", "1hwc"
"""
if layout is not None:
return layout
matched_layouts = [L for L in ["hwc", "chw", "hw", "1chw", "1hwc"] if layout_matches_image(L, image)]
assert len(matched_layouts) == 1, f"Could not determine layout from image with shape {image.shape}"
return matched_layouts[0]
# Path: tests/lib/test_convenience.py
import numpy as np
from visual.lib.convenience import (
ceil_division,
correct_layout,
get_grid_shape,
get_image_max_value_from_type,
get_layout_from_image,
)
def test_get_grid_shape():
assert get_grid_shape(1, 3) == (1, 1)
assert get_grid_shape(2, 3) == (1, 2)
assert get_grid_shape(3, 3) == (1, 3)
assert get_grid_shape(4, 3) == (2, 2)
assert get_grid_shape(5, 3) == (2, 3)
assert get_grid_shape(6, 3) == (2, 3)
assert get_grid_shape(7, 3) == (3, 3)
assert get_grid_shape(10, 3) == (4, 3)
def test_ceil_division():
assert ceil_division(19, 10) == 2
assert ceil_division(20, 10) == 2
assert ceil_division(21, 10) == 3
def test_get_layout_from_image():
| assert get_layout_from_image("hwc", np.zeros((1, 1, 1))) == "hwc" |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: SLDGroup/G-CASCADE
# Path: lib/gcn_lib/torch_nn.py
class BasicConv(Seq):
def __init__(self, channels, act='relu', norm=None, bias=True, drop=0., kernel_size=1, padding=0, groups=4):
m = []
for i in range(1, len(channels)):
m.append(Conv2d(channels[i - 1], channels[i], kernel_size, padding=padding, bias=bias, groups=groups))
if norm is not None and norm.lower() != 'none':
m.append(norm_layer(norm, channels[-1]))
if act is not None and act.lower() != 'none':
m.append(act_layer(act))
if drop > 0:
m.append(nn.Dropout2d(drop))
super(BasicConv, self).__init__(*m)
self.reset_parameters()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.InstanceNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# Path: lib/gcn_lib/torch_nn.py
def batched_index_select(x, idx):
r"""fetches neighbors features from a given neighbor idx
Args:
x (Tensor): input feature Tensor
:math:`\mathbf{X} \in \mathbb{R}^{B \times C \times N \times 1}`.
idx (Tensor): edge_idx
:math:`\mathbf{X} \in \mathbb{R}^{B \times N \times l}`.
Returns:
Tensor: output neighbors features
:math:`\mathbf{X} \in \mathbb{R}^{B \times C \times N \times k}`.
"""
batch_size, num_dims, num_vertices_reduced = x.shape[:3]
_, num_vertices, k = idx.shape
#print([batch_size,num_dims,num_vertices_reduced, num_vertices, k, x.shape])
idx_base = torch.arange(0, batch_size, device=idx.device).view(-1, 1, 1) * num_vertices_reduced
#print(idx_base.shape)
idx = idx + idx_base
idx = idx.contiguous().view(-1)
#print(x.shape)
x = x.transpose(2, 1)
#print(x.shape)
x = x.contiguous().view(batch_size * num_vertices_reduced, -1)
#print(x.shape)
feature = x[idx, :]
#print(feature.shape)
feature = feature.view(batch_size, num_vertices, k, num_dims)
#print(feature.shape)
feature = feature.permute(0, 3, 1, 2).contiguous()
#print(feature.shape)
return feature
# Path: lib/gcn_lib/torch_nn.py
def act_layer(act, inplace=False, neg_slope=0.2, n_prelu=1):
# activation layer
act = act.lower()
if act == 'relu':
layer = nn.ReLU(inplace)
elif act == 'leakyrelu':
layer = nn.LeakyReLU(neg_slope, inplace)
elif act == 'prelu':
layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
elif act == 'gelu':
layer = nn.GELU()
elif act == 'hswish':
layer = nn.Hardswish(inplace)
else:
raise NotImplementedError('activation layer [%s] is not found' % act)
return layer
# Path: lib/gcn_lib/torch_edge.py
class DenseDilatedKnnGraph(nn.Module):
"""
Find the neighbors' indices based on dilated knn
"""
def __init__(self, k=9, dilation=1, stochastic=False, epsilon=0.0):
super(DenseDilatedKnnGraph, self).__init__()
self.dilation = dilation
self.stochastic = stochastic
self.epsilon = epsilon
self.k = k
self._dilated = DenseDilated(k, dilation, stochastic, epsilon)
def forward(self, x, y=None, relative_pos=None):
if y is not None:
#### normalize
x = F.normalize(x, p=2.0, dim=1)
y = F.normalize(y, p=2.0, dim=1)
####
edge_index = xy_dense_knn_matrix(x, y, self.k * self.dilation, relative_pos)
else:
#### normalize
x = F.normalize(x, p=2.0, dim=1)
####
edge_index = dense_knn_matrix(x, self.k * self.dilation, relative_pos)
return self._dilated(edge_index)
# Path: lib/gcn_lib/pos_embed.py
def get_2d_relative_pos_embed(embed_dim, grid_size):
"""
grid_size: int of the grid height and width
return:
pos_embed: [grid_size*grid_size, grid_size*grid_size]
"""
pos_embed = get_2d_sincos_pos_embed(embed_dim, grid_size)
relative_pos = 2 * np.matmul(pos_embed, pos_embed.transpose()) / pos_embed.shape[1]
return relative_pos
# Path: lib/gcn_lib/torch_vertex.py
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from .torch_nn import BasicConv, batched_index_select, act_layer
from .torch_edge import DenseDilatedKnnGraph
from .pos_embed import get_2d_relative_pos_embed
from timm.models.layers import DropPath
# 2022.06.17-Changed for building ViG model
# Huawei Technologies Co., Ltd. <foss@huawei.com>
class MRConv2d(nn.Module):
"""
Max-Relative Graph Convolution (Paper: https://arxiv.org/abs/1904.03751) for dense data type
"""
def __init__(self, in_channels, out_channels, act='relu', norm=None, bias=True, kernel_size=1, padding=0, groups=4):
super(MRConv2d, self).__init__()
| self.nn = BasicConv([in_channels*2, out_channels], act, norm, bias, kernel_size=1, padding=0, groups=4) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: StackTipsLab/bloggy
# Path: bloggy/models.py
# Path: bloggy/models/comment.py
class Comment(models.Model):
post = models.ForeignKey('bloggy.Post', on_delete=models.CASCADE, related_name='comments')
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='comments', blank=True,
null=True)
parent = models.ForeignKey('self', related_name='reply_set', null=True, on_delete=models.PROTECT)
comment_content = models.TextField()
comment_author_name = models.TextField(null=True, blank=True)
comment_author_email = models.TextField(null=True, blank=True)
comment_author_url = models.TextField(null=True, blank=True)
comment_author_ip = models.GenericIPAddressField(default="0.0.0.0", null=True, blank=True)
comment_date = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=False)
class Meta:
ordering = ['comment_date']
verbose_name = "Comment"
verbose_name_plural = "Comments"
def __str__(self):
return 'Comment {} by {}'.format(self.comment_content, self.user.get_full_name() if self.user else '-')
def get_comments(self):
return Comment.objects.filter(parent=self).filter(active=True)
# Path: bloggy/models/course.py
class Course(Content):
difficulty = models.CharField(
max_length=20, choices=[
('beginner', 'Beginner'),
('intermediate', 'Intermediate'),
('advance', 'advance'),
],
default='easy', blank=True, null=True,
help_text="Select difficulty",
verbose_name="Difficulty level")
is_featured = models.BooleanField(
default=False,
help_text="Should this story be featured on site?"
)
description = models.TextField(null=True, help_text='Enter answer')
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='courses')
thumbnail = models.ImageField(upload_to=upload_thumbnail_image, null=True, blank=True)
category = models.ForeignKey(Category, blank=True, on_delete=models.CASCADE, related_name='courses')
view_count = GenericRelation(HitCount, object_id_field='object_pk', related_query_name='hit_count_generic_relation')
class Meta:
ordering = ['-display_order']
verbose_name = "course"
verbose_name_plural = "courses"
indexes = [
models.Index(fields=['slug', 'publish_status', 'published_date']),
]
def get_absolute_url(self):
return reverse("courses_single", kwargs={"slug": str(self.slug)})
@property
def get_lessons(self):
return self.post_set.filter(publish_status="LIVE").order_by("display_order").all()
def thumbnail_tag(self):
if self.thumbnail:
return format_html(f'<img src="{self.thumbnail.url}" width="auto" height="40"/>')
return ""
thumbnail_tag.short_description = 'Logo'
thumbnail_tag.allow_tags = True
# Path: bloggy/models/quizzes.py
class Quiz(Content):
difficulty = models.CharField(
max_length=20,
choices=[
('beginner', 'Beginner'),
('intermediate', 'Intermediate'),
('advance', 'advance'),
],
default='easy', blank=True, null=True,
help_text="Select difficulty",
verbose_name="Difficulty level")
is_featured = models.BooleanField(
default=False,
help_text="Should this story be featured on site?"
)
content = TextField(
null=True,
help_text='Post content'
)
thumbnail = models.ImageField(
upload_to=upload_thumbnail_image,
null=True,
blank=True)
category = models.ForeignKey(
Category,
blank=True,
on_delete=models.CASCADE,
related_name='quizzes'
)
duration = models.IntegerField(
help_text="Duration in minutes. For articles, it will be calculated automatically.",
default="1"
)
view_count = GenericRelation(
HitCount,
object_id_field='object_pk',
related_query_name='hit_count_generic_relation'
)
@property
def get_questions_json(self):
return get_questions_json(self)
def get_questions(self):
return self.quizquestion_set.all()
class Meta:
ordering = ['title']
verbose_name = "Quiz"
verbose_name_plural = "Quizzes"
indexes = [
models.Index(fields=['slug', 'publish_status']),
]
# Path: bloggy_api/serializers.py
from rest_framework import serializers
from bloggy.models import Post, User, Category
from bloggy.models.comment import Comment
from bloggy.models.course import Course
from bloggy.models.quizzes import Quiz
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = [
'id',
'title',
'article_count',
'slug',
'description',
'color',
'logo',
'publish_status',
'created_date',
'updated_date',
]
class AuthorSerializer(serializers.ModelSerializer):
full_name = serializers.SerializerMethodField('get_full_name')
class Meta:
model = User
fields = (
'name',
'username',
'profile_photo',
'website',
'twitter',
'youtube',
'github',
'bio',
)
class UserSerializer(serializers.ModelSerializer):
name = serializers.CharField()
email = serializers.EmailField()
profile_photo = serializers.ImageField()
website = serializers.CharField()
twitter = serializers.CharField()
youtube = serializers.CharField()
github = serializers.CharField()
bio = serializers.CharField()
class Meta:
model = User
fields = [
'name',
'email',
'username',
'profile_photo',
'website',
'twitter',
'youtube',
'github',
'bio',
]
class CourseSerializer(serializers.ModelSerializer):
class Meta:
| model = Course |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: openvinotoolkit/openvino.genai
# Path: llm_bench/python/utils/nncf_utils.py
COMPRESSION_OPTIONS = {
"INT8": {"mode": nncf.CompressWeightsMode.INT8 if "INT8_ASYM" not in nncf.CompressWeightsMode.__members__ else nncf.CompressWeightsMode.INT8_ASYM},
"INT4_SYM": {
"mode": nncf.CompressWeightsMode.INT4_SYM,
"group_size": 128,
},
"INT4_ASYM": {
"mode": nncf.CompressWeightsMode.INT4_ASYM,
"group_size": 128,
},
}
# Path: llm_bench/python/utils/nncf_utils.py
INT4_MODEL_CONFIGURATION = {
"dolly-v2-3b": {"mode": nncf.CompressWeightsMode.INT4_ASYM, "group_size": 32, "ratio": 0.5},
"gpt-j-6b": {"mode": nncf.CompressWeightsMode.INT4_ASYM, "group_size": 64},
"opt-6.7b": {"mode": nncf.CompressWeightsMode.INT4_ASYM, "group_size": 64, "ratio": 0.8},
"bloomz-7b1": {"mode": nncf.CompressWeightsMode.INT4_ASYM, "group_size": 32, "ratio": 0.6},
"red-pajama-incite-7b-instruct": {"mode": nncf.CompressWeightsMode.INT4_ASYM, "group_size": 128},
"zephyr-7b-beta": {"mode": nncf.CompressWeightsMode.INT4_SYM, "group_size": 64, "ratio": 0.6},
"llama-2-7b": {"mode": nncf.CompressWeightsMode.INT4_SYM, "group_size": 128, "ratio": 0.6},
"llama-2-7b-chat": {"mode": nncf.CompressWeightsMode.INT4_SYM, "group_size": 128, "ratio": 0.8},
"llama-2-13b-chat": {"mode": nncf.CompressWeightsMode.INT4_SYM, "group_size": 64, "ratio": 0.8},
"stablelm-3b-4e1t": {"mode": nncf.CompressWeightsMode.INT4_SYM, "group_size": 64, "ratio": 0.8},
"stablelm-epoch-3b-preview": {"mode": nncf.CompressWeightsMode.INT4_SYM, "group_size": 64, "ratio": 0.8},
"stable-zephyr-3b-dpo": {"mode": nncf.CompressWeightsMode.INT4_ASYM, "group_size": 64, "ratio": 0.8},
"rocket-3b": {"mode": nncf.CompressWeightsMode.INT4_SYM, "group_size": 128, "ratio": 0.8},
"chatglm2-6b": {"mode": nncf.CompressWeightsMode.INT4_SYM, "group_size": 128, "ratio": 0.72},
"qwen-7b-chat": {"mode": nncf.CompressWeightsMode.INT4_SYM, "group_size": 128, "ratio": 0.6},
}
# Path: llm_bench/python/utils/conversion_utils/helpers.py
from enum import Enum
from pathlib import Path
from nncf import compress_weights
from openvino import save_model
from ..nncf_utils import COMPRESSION_OPTIONS, INT4_MODEL_CONFIGURATION
from optimum.gptq import GPTQQuantizer
from auto_gptq import exllama_set_max_input_length
from optimum.gptq import GPTQQuantizer
import logging as log
import torch
import warnings
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
class BackendType(Enum):
PYTORCH = 'pytorch'
OPENVINO = 'openvino'
PYTORCH_DIR = 'pytorch'
PYTORCH_COMPRESS_WEIGHTS_DIR = 'compressed_weights/PT_{precision}-{compression}'
OV_DIR = 'dldt'
GPTQ_DIR = "GPTQ_INT4-{precision}"
def is_torch_compression(args):
return args.compress_weights and BackendType.PYTORCH.value in args.compress_weights_backends
def is_ov_compression(args):
return args.compress_weights and BackendType.OPENVINO.value in args.compress_weights_backends
def is_fp16(args):
return args.precision == "FP16"
def is_ov_model_provided(model_id, model_dir, precision, model_name="openvino_model.xml"):
model_dirs = []
if Path(model_id).is_dir():
model_dirs.append(Path(model_id))
model_dirs.append(Path(model_id) / precision)
model_dirs.append(Path(model_id) / OV_DIR / precision)
model_dirs.append(Path(model_id) / PYTORCH_DIR / OV_DIR / precision)
model_dir = Path(model_dir)
model_dirs.append(model_dir)
model_dirs.append(model_dir / precision)
model_dirs.append(model_dir / OV_DIR / precision)
model_dirs.append(model_dir / PYTORCH_DIR / OV_DIR / precision)
for md in model_dirs:
found = True
for suffix in ['.xml', '.bin']:
model_file = (md / model_name).with_suffix(suffix)
if not model_file.exists():
found = False
break
if found:
return found
return False
def get_fp_path(args, model_subpath):
model_dirs = []
if Path(args.model_id).is_dir():
base_model_dir = Path(args.model_id)
model_dirs.extend([
base_model_dir, base_model_dir / args.precision, base_model_dir / OV_DIR / args.precision, base_model_dir / PYTORCH_DIR / OV_DIR / args.precision
])
model_dir = Path(args.output_dir)
model_dirs.append(model_dir)
model_dirs.append(Path(model_dir) / args.precision)
model_dirs.append(Path(model_dir) / OV_DIR / args.precision)
model_dirs.append(Path(model_dir) / PYTORCH_DIR / OV_DIR / args.precision)
for md in model_dirs:
if (md / model_subpath).exists():
return md / model_subpath
return None
def save_tokenizer(tokenizer, out_dir):
try:
tokenizer.save_pretrained(out_dir)
except Exception as e:
log.error(f'tokenizer loading failed with {e}')
def compress_ov_model_weights_helper(ov_model, tok, config, out_path, compress_weights_format="INT8", fp16=False, args={}, model_name="openvino_model"):
compression_args = None
if "INT8" in compress_weights_format and "INT8_ASYM" in COMPRESSION_OPTIONS:
warnings.warn("Usage INT8 mode is deprecated and will be removed soon. Please use INT8_ASYM instead", DeprecationWarning)
if "4BIT_DEFAULT" in compress_weights_format:
model_id = out_path.parents[3].name
| if model_id in INT4_MODEL_CONFIGURATION: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Iniquitatis/sd-webui-temporal
# Path: temporal/fs.py
def ensure_directory_exists(path):
if not path.is_dir():
path.mkdir(parents = True)
return path
# Path: temporal/fs.py
def load_json(path, fallback = None):
if not path.is_file():
return fallback
with open_utf8(path, "r") as file:
return json.load(file)
# Path: temporal/fs.py
def save_json(path, data):
with open_utf8(path, "w") as file:
json.dump(data, file, indent = 4)
# Path: temporal/image_utils.py
def ensure_image_dims(im, mode, size):
if is_np := isinstance(im, np.ndarray):
im = Image.fromarray(skimage.util.img_as_ubyte(im))
if im.mode != mode:
im = im.convert(mode)
if im.size != size:
im = im.resize(size, Image.Resampling.LANCZOS)
return skimage.util.img_as_float(im) if is_np else im
# Path: temporal/image_utils.py
def np_to_pil(npim):
return Image.fromarray(skimage.util.img_as_ubyte(npim))
# Path: temporal/image_utils.py
def pil_to_np(im):
return skimage.util.img_as_float(im)
# Path: temporal/numpy_utils.py
def average_array(arr, axis, trim = 0.0, power = 1.0, weights = None):
if trim == 0.5:
return np.median(arr, axis)
elif trim > 0.0:
arr = stats.trimboth(arr, trim, axis)
weights = None
if weights is not None:
weights = match_array_dimensions(weights, arr, axis)
if power != 1.0:
arr = arr + 1.0
if power == -1.0:
result = stats.hmean(arr, axis = axis, weights = weights)
elif power == 0.0:
result = stats.gmean(arr, axis = axis, weights = weights)
elif power == 1.0:
result = np.average(arr, axis, weights)
elif power == 2.0:
result = np.sqrt(np.average(np.square(arr), axis, weights))
elif power == 3.0:
result = np.cbrt(np.average(np.power(arr, 3.0), axis, weights))
else:
result = stats.pmean(arr, power, axis = axis, weights = weights)
if power != 1.0:
result -= 1.0
return result
# Path: temporal/numpy_utils.py
def make_eased_weight_array(count, easing):
return (np.linspace(1, count, count, dtype = np.float_) / count) ** easing
# Path: temporal/serialization.py
def load_object(obj, data, data_dir, existing_only = True):
for key, value in data.items():
if not existing_only or hasattr(obj, key):
setattr(obj, key, _load_value(value, data_dir))
# Path: temporal/serialization.py
def save_object(obj, data_dir, filter = None):
return {k: _save_value(v, data_dir) for k, v in vars(obj).items() if not filter or k in filter}
# Path: temporal/image_buffer.py
import numpy as np
from temporal.fs import ensure_directory_exists, load_json, save_json
from temporal.image_utils import ensure_image_dims, np_to_pil, pil_to_np
from temporal.numpy_utils import average_array, make_eased_weight_array
from temporal.serialization import load_object, save_object
class ImageBuffer:
def __init__(self, width, height, channels, count):
self.array = np.zeros((count, height, width, channels))
self.last_index = 0
@property
def width(self):
return self.array.shape[2]
@property
def height(self):
return self.array.shape[1]
@property
def channels(self):
return self.array.shape[3]
@property
def count(self):
return self.array.shape[0]
def init(self, im):
npim = self._convert_image_to_np(im)
for i in range(self.count):
self.array[i] = npim
def add(self, im):
self.array[self.last_index] = self._convert_image_to_np(im)
self.last_index += 1
self.last_index %= self.count
def average(self, trimming = 0.0, easing = 0.0, preference = 0.0):
return np_to_pil(self.array[0] if self.count == 1 else np.clip(average_array(
self.array,
axis = 0,
trim = trimming,
power = preference + 1.0,
weights = np.roll(make_eased_weight_array(self.count, easing), self.last_index),
), 0.0, 1.0))
def load(self, project_dir):
buffer_dir = project_dir / "session" / "buffer"
if data := load_json(buffer_dir / "data.json"):
load_object(self, data, buffer_dir)
def save(self, project_dir):
buffer_dir = ensure_directory_exists(project_dir / "session" / "buffer")
save_json(buffer_dir / "data.json", save_object(self, buffer_dir))
def _convert_image_to_np(self, im):
| return pil_to_np(ensure_image_dims( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zabbix/python-zabbix-utils
# Path: zabbix_utils/logger.py
class EmptyHandler(logging.Handler):
"""Empty logging handler."""
def emit(self, *args, **kwargs):
pass
# Path: zabbix_utils/common.py
class ZabbixProtocol():
ZABBIX_PROTOCOL = b'ZBXD'
HEADER_SIZE = 13
@classmethod
def __prepare_request(cls, data: Union[bytes, str, list, dict]) -> bytes:
if isinstance(data, bytes):
return data
if isinstance(data, str):
return data.encode("utf-8")
if isinstance(data, list) or isinstance(data, dict):
return json.dumps(data, ensure_ascii=False).encode("utf-8")
raise TypeError("Unsupported data type, only 'bytes', 'str', 'list' or 'dict' is expected")
@classmethod
def create_packet(cls, payload: Union[bytes, str, list, dict],
log: Logger, compression: bool = False) -> bytes:
"""Create a packet for sending via the Zabbix protocol.
Args:
payload (Union[bytes, str, list, dict]): Payload of the future packet
log (Logger): Logger object
compression (bool, optional): Compression use flag. Defaults to `False`.
Returns:
bytes: Generated Zabbix protocol packet
"""
request = cls.__prepare_request(payload)
log.debug('Request data: %s', shorten(request.decode("utf-8"), 200, placeholder='...'))
# 0x01 - Zabbix communications protocol
flags = 0x01
datalen = len(request)
reserved = 0
if compression:
# 0x02 - Using packet compression mode
flags |= 0x02
reserved = datalen
request = zlib.compress(request)
datalen = len(request)
header = struct.pack('<4sBII', cls.ZABBIX_PROTOCOL, flags, datalen, reserved)
packet = header + request
log.debug('Content of the packet: %s', shorten(str(packet), 200, placeholder='...'))
return packet
@classmethod
def receive_packet(cls, conn: socket, size: int, log: Logger) -> bytes:
"""Receive a Zabbix protocol packet.
Args:
conn (socket): Opened socket connection
size (int): Expected packet size
log (Logger): Logger object
Returns:
bytes: Received packet content
"""
buf = b''
while len(buf) < size:
chunk = conn.recv(size - len(buf))
if not chunk:
log.debug("Socket connection was closed before receiving expected amount of data.")
break
buf += chunk
return buf
@classmethod
def parse_packet(cls, conn: socket, log: Logger, exception) -> str:
"""Parse a received Zabbix protocol packet.
Args:
conn (socket): Opened socket connection
log (Logger): Logger object
exception: Exception type
Raises:
exception: Depends on input exception type
Returns:
str: Body of the received packet
"""
response_header = cls.receive_packet(conn, cls.HEADER_SIZE, log)
log.debug('Zabbix response header: %s', response_header)
if (not response_header.startswith(cls.ZABBIX_PROTOCOL) or
len(response_header) != cls.HEADER_SIZE):
log.debug('Unexpected response was received from Zabbix.')
raise exception('Unexpected response was received from Zabbix.')
flags, datalen, reserved = struct.unpack('<BII', response_header[4:])
# 0x01 - Zabbix communications protocol
if not flags & 0x01:
raise exception(
'Unexcepted flags were received. '
'Check debug log for more information.'
)
# 0x04 - Using large packet mode
if flags & 0x04:
raise exception(
'A large packet flag was received. '
'Current module doesn\'t support large packets.'
)
# 0x02 - Using packet compression mode
if flags & 0x02:
response_body = zlib.decompress(cls.receive_packet(conn, datalen, log))
else:
response_body = cls.receive_packet(conn, datalen, log)
return response_body.decode("utf-8")
# Path: zabbix_utils/exceptions.py
class ProcessingError(ModuleBaseException):
def __init__(self, *args):
super().__init__(" ".join(map(str, args)))
return
# Path: zabbix_utils/sender.py
import re
import json
import socket
import logging
import configparser
from decimal import Decimal
from typing import Callable, Union
from typing import Self # type: ignore
from typing_extensions import Self
from .logger import EmptyHandler
from .common import ZabbixProtocol
from .exceptions import ProcessingError
# zabbix_utils
#
# Copyright (C) 2001-2023 Zabbix SIA
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# For Python less 3.11 compatibility
try:
except ImportError:
log = logging.getLogger(__name__)
| log.addHandler(EmptyHandler()) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: miccunifi/TAPE
# Path: models/losses.py
class CharbonnierLoss(nn.Module):
"""
Charbonnier loss (one variant of Robust L1Loss, a differentiable variant of L1Loss).
Described in "Deep Laplacian Pyramid Networks for Fast and Accurate Super-Resolution".
Args:
eps (float): A value used to control the curvature near zero. Default: 1e-12.
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
"""
def __init__(self, eps: float = 1e-12, loss_weight: float = 1.0):
super(CharbonnierLoss, self).__init__()
self.loss_weight = loss_weight
self.eps = eps
def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
"""
return self.loss_weight * charbonnier_loss(pred, target, eps=self.eps)
# Path: models/losses.py
class PerceptualLoss(nn.Module):
"""
VGG 19 Perceptual loss
Args:
layer_weights (dict): Layer weights for perceptual loss.
use_input_norm (bool): If True, x: [0, 1] --> (x - mean) / std. Default: True
use_range_norm (bool): If True, norm images with range [-1, 1] to [0, 1]. Default: False.
criterion (str): Criterion type. Default: 'l2'.
loss_weight (float): Loss weight for perceptual loss. Default: 1.0.
"""
def __init__(self, layer_weights: dict, use_input_norm: bool = True, use_range_norm: bool = False,
criterion: str = 'l2', loss_weight: float = 1.0):
super(PerceptualLoss, self).__init__()
self.layer_weights = layer_weights
self.vgg = VGGFeatureExtractor(layer_name_list=list(layer_weights.keys()),
use_input_norm=use_input_norm,
use_range_norm=use_range_norm)
self.criterion_type = criterion
if self.criterion_type == 'l1':
self.criterion = torch.nn.L1Loss()
elif self.criterion_type == 'l2':
self.criterion = torch.nn.MSELoss()
else:
raise NotImplementedError(f'{criterion} criterion is not supported.')
self.loss_weight = loss_weight
def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""Forward function.
Args:
pred (Tensor): Input tensor with shape (n, c, h, w).
target (Tensor): Ground-truth tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
pred_feat = self.vgg(pred)
target_feat = self.vgg(target.detach())
loss = 0.0
for i in pred_feat.keys():
loss += self.criterion(pred_feat[i], target_feat[i]) * self.layer_weights[i]
loss *= self.loss_weight
return loss
# Path: models/pl_model_module.py
import torch
import torch.nn as nn
import pytorch_lightning as pl
import torchmetrics.image
import torchmetrics
import os.path as osp
from torchvision.transforms.functional import to_pil_image
from torchmetrics.functional.image.ssim import structural_similarity_index_measure
from einops import rearrange
from models.losses import CharbonnierLoss, PerceptualLoss
class ModelModule(pl.LightningModule):
"""
Pytorch Lightning Module for model training.
Args:
net (nn.Module): Model to train
num_input_frames (int): Number of input frames in the input window
pixel_loss_weight (float): Weight of the pixel loss
perceptual_loss_weight (float): Weight of the perceptual loss
lr (float): Learning rate
"""
def __init__(self, net: nn.Module, num_input_frames: int = 5, pixel_loss_weight: float = 200,
perceptual_loss_weight: float = 1, lr: float = 2e-5):
super(ModelModule, self).__init__()
self.save_hyperparameters(ignore=["net"])
self.net = net
self.num_input_frames = num_input_frames
self.pixel_loss_weight = pixel_loss_weight
self.perceptual_loss_weight = perceptual_loss_weight
self.lr = lr
| self.pixel_criterion = CharbonnierLoss(loss_weight=self.pixel_loss_weight) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: YefanZhou/TempBalance
# Path: object_detection/src/YOLOv8/ultralytics/vit/sam/build.py
def build_sam(ckpt='sam_b.pt'):
"""Build a SAM model specified by ckpt."""
model_builder = None
for k in sam_model_map.keys():
if ckpt.endswith(k):
model_builder = sam_model_map.get(k)
if not model_builder:
raise FileNotFoundError(f'{ckpt} is not a supported sam model. Available models are: \n {sam_model_map.keys()}')
return model_builder(ckpt)
# Path: object_detection/src/YOLOv8/ultralytics/vit/sam/predict.py
class Predictor(BasePredictor):
def preprocess(self, im):
"""Prepares input image for inference."""
# TODO: Only support bs=1 for now
# im = ResizeLongestSide(1024).apply_image(im[0])
# im = torch.as_tensor(im, device=self.device)
# im = im.permute(2, 0, 1).contiguous()[None, :, :, :]
return im[0]
def setup_model(self, model):
"""Set up YOLO model with specified thresholds and device."""
device = select_device(self.args.device)
model.eval()
self.model = SamAutomaticMaskGenerator(model.to(device),
pred_iou_thresh=self.args.conf,
box_nms_thresh=self.args.iou)
self.device = device
# TODO: Temporary settings for compatibility
self.model.pt = False
self.model.triton = False
self.model.stride = 32
self.model.fp16 = False
self.done_warmup = True
def postprocess(self, preds, path, orig_imgs):
"""Postprocesses inference output predictions to create detection masks for objects."""
names = dict(enumerate(list(range(len(preds)))))
results = []
# TODO
for i, pred in enumerate([preds]):
masks = torch.from_numpy(np.stack([p['segmentation'] for p in pred], axis=0))
orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs
path = self.batch[0]
img_path = path[i] if isinstance(path, list) else path
results.append(Results(orig_img=orig_img, path=img_path, names=names, masks=masks))
return results
# def __call__(self, source=None, model=None, stream=False):
# frame = cv2.imread(source)
# preds = self.model.generate(frame)
# return self.postprocess(preds, source, frame)
# Path: object_detection/src/YOLOv8/ultralytics/vit/sam/model.py
from ultralytics.yolo.cfg import get_cfg
from .build import build_sam
from .predict import Predictor
# SAM model interface
class SAM:
def __init__(self, model='sam_b.pt') -> None:
if model and not model.endswith('.pt') and not model.endswith('.pth'):
# Should raise AssertionError instead?
raise NotImplementedError('Segment anything prediction requires pre-trained checkpoint')
| self.model = build_sam(model) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: intuit/sac3
# Path: sac3/paraphraser.py
def paraphrase(question, number, model, temperature):
# Path: sac3/evaluator.py
class Evaluate:
def __init__(self, model):
self.model = model
self.prompt_temp = 'Answer the following question:\n'
def self_evaluate(self, self_question, temperature, self_num):
'''
Inputs:
self_question - original user query
temperature - [0,1] for LLM randomness
self_num - how many generated responses given this question
Outputs:
self_responses - generated responses given this question with different temperatures
'''
self_responses = []
prompt = self.prompt_temp + '\nQ:' + self_question
for i in range(self_num):
# llm model: GPTs, open-source models (falcon, guanaco)
if self.model in ['gpt-3.5-turbo','gpt-4']:
res = llm_models.call_openai_model(prompt, self.model, temperature) # openai model call
elif self.model == 'guanaco-33b':
res = llm_models.call_guanaco_33b(prompt, max_new_tokens = 200)
elif self.model == 'falcon-7b':
res = llm_models.call_falcon_7b(prompt, max_new_tokens = 200)
# other open-sourced llms
self_responses.append(res)
return self_responses
def perb_evaluate(self, perb_questions, temperature):
'''
Inputs:
perb_questions - perturbed questions that are semantically equivalent to the original question
temperature - [0,1] for LLM randomness
Outputs:
perb_responses - generated responses given the perturbed questions
'''
perb_responses = []
for i in range(len(perb_questions)):
prompt = self.prompt_temp + '\nQ:' + perb_questions[i]
# llm model: GPTs, open-source models (falcon, guanaco)
if self.model in ['gpt-3.5-turbo','gpt-4']:
res = llm_models.call_openai_model(prompt, self.model, temperature) # openai model call
elif self.model == 'guanaco-33b':
res = llm_models.call_guanaco_33b(prompt, max_new_tokens = 200)
elif self.model == 'falcon-7b':
res = llm_models.call_falcon_7b(prompt, max_new_tokens = 200)
# other open-sourced llms
perb_responses.append(res)
return perb_responses
# Path: sac3/consistency_checker.py
class SemanticConsistnecyCheck:
def __init__(self, model):
self.model = model
self.prompt_temp = """
Are the following two Question-Answer(QA) pairs semantically equivalent?
Provide your best guess and the probability that it is correct (0.0 to 1.0).
Given ONLY the guess (Yes or No) and probability, no other words or explanation.
For example:
Guess: <most likely guess, as short as possible; not a complete sentence, just the guess!>
Probability: <the probability between 0.0 and 1.0 that your guess is correct, without any extra commentary whatsoever;
just the probability!>
"""
def score_scc(self, question, target_answer, candidate_answers, temperature):
'''
Inputs:
question - original user query
target_answer - generated response given the original question (temp=0) if not provided by user
candidate_answers - generated responses given the question (original + perturbed)
temperature - [0,1] for LLM randomness
Outputs:
score - inconsistency score (hallucination metric)
sc_output - specific score for each candidate answers compared with the target answer
'''
if target_answer is None:
raise ValueError("Target answer cannot be None. ")
sc_output = []
target_pair = 'Q:' + question + '\nA:' + target_answer
num_candidate_answer = len(candidate_answers)
for i in range(num_candidate_answer):
candidate_pair = 'Q:' + question + '\nA:' + candidate_answers[i]
prompt = self.prompt_temp + '\nThe first QA pair is:\n' + target_pair + '\nThe second QA pair is:\n' + candidate_pair
res = llm_models.call_openai_model(prompt, self.model, temperature) # openai model call
guess = res.split(':')[1].split('\n')[0].strip()
# print(res, guess)
value = 0 if guess == 'Yes' else 1
# print('value',value)
sc_output.append(value)
score = sum(sc_output)/num_candidate_answer
return score, sc_output
# Path: sac3/main.py
from sac3 import paraphraser
from sac3.evaluator import Evaluate
from sac3.consistency_checker import SemanticConsistnecyCheck
# input information
question = 'Was there ever a US senator that represented the state of Alabama and whose alma mater was MIT?'
target_answer = 'Never'
# question pertubation
gen_question = paraphraser.paraphrase(question, number = 3, model = 'gpt-3.5-turbo', temperature=1.0)
# llm evaluation
| llm_evaluate = Evaluate(model='gpt-3.5-turbo') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zcczhang/UVD
# Path: uvd/utils/array_tensor_utils.py
def any_stack(xs: List, *, dim: int = 0):
"""Works for both torch Tensor and numpy array."""
def _any_stack_helper(*xs):
x = xs[0]
if isinstance(x, np.ndarray):
return np.stack(xs, axis=dim)
elif torch.is_tensor(x):
return torch.stack(xs, dim=dim)
elif isinstance(x, float):
# special treatment for float, defaults to float32
return np.array(xs, dtype=np.float32)
else:
return np.array(xs)
return tree.map_structure(_any_stack_helper, *xs)
# Path: uvd/utils/array_tensor_utils.py
def any_to_torch_tensor(
x,
dtype: Union[str, torch.dtype, None] = None,
device: Union[str, int, torch.device, None] = None,
copy=False,
non_blocking=False,
smart_optimize: bool = True,
):
dtype = torch_dtype(dtype)
device = torch_device(device)
if not isinstance(x, (torch.Tensor, np.ndarray)):
# x is a primitive python sequence
x = torch.tensor(x, dtype=dtype)
copy = False
# This step does not create any copy.
# If x is a numpy array, simply wraps it in Tensor. If it's already a Tensor, do nothing.
x = torch.as_tensor(x)
# avoid passing None to .to(), PyTorch 1.4 bug
dtype = dtype or x.dtype
device = device or x.device
if not smart_optimize:
# do a single stage type conversion and transfer
return x.to(dtype=dtype, device=device, copy=copy, non_blocking=non_blocking)
# we have two choices: (1) convert dtype and then transfer to GPU
# (2) transfer to GPU and then convert dtype
# because CPU-to-GPU memory transfer is the bottleneck, we will reduce it as
# much as possible by sending the smaller dtype
src_dtype_size = torch_dtype_size(x.dtype)
# destination dtype size
if dtype is None:
dest_dtype_size = src_dtype_size
else:
dest_dtype_size = torch_dtype_size(dtype)
if x.dtype != dtype or x.device != device:
# a copy will always be performed, no need to force copy again
copy = False
if src_dtype_size > dest_dtype_size:
# better to do conversion on one device (e.g. CPU) and then transfer to another
return _convert_then_transfer(x, dtype, device, copy, non_blocking)
elif src_dtype_size == dest_dtype_size:
# when equal, we prefer to do the conversion on whichever device that's GPU
if x.device.type == "cuda":
return _convert_then_transfer(x, dtype, device, copy, non_blocking)
else:
return _transfer_then_convert(x, dtype, device, copy, non_blocking)
else:
# better to transfer data across device first, and then do conversion
return _transfer_then_convert(x, dtype, device, copy, non_blocking)
# Path: uvd/utils/array_tensor_utils.py
def any_to_numpy(
x,
dtype: Union[str, np.dtype, None] = None,
copy: bool = False,
non_blocking: bool = False,
smart_optimize: bool = True,
exclude_none: bool = False,
):
if exclude_none and x is None:
return x
if isinstance(x, torch.Tensor):
x = any_to_torch_tensor(
x,
dtype=dtype,
device="cpu",
copy=copy,
non_blocking=non_blocking,
smart_optimize=smart_optimize,
)
return x.detach().numpy()
else:
# primitive python sequence or ndarray
return np.array(x, dtype=dtype, copy=copy)
# Path: uvd/utils/file_utils.py
def f_mkdir(*fpaths):
"""Recursively creates all the subdirs If exist, do nothing."""
fpath = f_join(*fpaths)
os.makedirs(fpath, exist_ok=True)
return fpath
# Path: uvd/utils/file_utils.py
def f_join(*fpaths):
"""Join file paths and expand special symbols like `~` for home dir."""
return f_expand(os.path.join(*fpaths))
# Path: uvd/utils/file_utils.py
def f_remove(fpath, verbose=False, dry_run=False):
"""If exist, remove.
Supports both dir and file. Supports glob wildcard.
"""
assert isinstance(verbose, bool)
fpath = f_expand(fpath)
if dry_run:
print("Dry run, delete:", fpath)
return
for f in glob.glob(fpath):
try:
shutil.rmtree(f)
except OSError as e:
if e.errno == errno.ENOTDIR:
try:
os.remove(f)
except: # final resort safeguard
pass
if verbose:
print(f'Deleted "{fpath}"')
# Path: uvd/utils/video_utils.py
import subprocess
import numpy as np
import torch
import torchvision.io
import ffmpeg # pip install ffmpeg-python
from typing import Union, List, Optional
from .array_tensor_utils import any_stack, any_to_torch_tensor, any_to_numpy
from .file_utils import f_mkdir, f_join, f_remove
from einops import rearrange
from einops import rearrange
__all__ = ["save_video", "ffmpeg_save_video", "compress_video", "VideoTensorWriter"]
def save_video(
video: Union[np.ndarray, torch.Tensor],
fname: str,
fps: Optional[int] = None,
compress: bool = False,
):
fname = f_join(fname)
| video = any_to_torch_tensor(video) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Aatrox103/SAP
# Path: utils/openai_api.py
@backoff.on_exception(backoff.expo, (openai.error.RateLimitError, openai.error.APIError, openai.error.APIConnectionError, openai.error.Timeout))
def askChatGPT(prompt, model_name):
if model_name == "gpt-3.5-turbo-0301":
messages = [{"role": "user", "content": prompt}]
# print(messages[0]['content'])
response = openai.ChatCompletion.create(model=model_name, messages=messages, temperature=1)
return response['choices'][0]['message']['content']
elif model_name == "text-davinci-003":
completions = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=2048,
)
message = completions.choices[0].text
return message
# Path: utils/openai_api.py
def GPT_eval_score(text, model_name, template_eval):
content = askChatGPT(template_eval.format(text), model_name)
match = re.search(r'(\d+)', content)
if match is None:
return None
else:
score = match.group(1)
return int(score)
# Path: utils/openai_api.py
@backoff.on_exception(backoff.expo, (openai.error.RateLimitError, openai.error.APIError, openai.error.APIConnectionError, openai.error.Timeout))
def Moderation(input):
response = openai.Moderation.create(model="text-moderation-stable", input=input)
return response
# Path: gpt_eval.py
import openai
import json
import re
import os
import backoff
import logging
import fire
from tqdm import tqdm
from utils.openai_api import askChatGPT, GPT_eval_score, Moderation
#data_path: The name of the grandparent directory where the file is read and stored
#gpt_output: list of text to be evaluated
#eval_filename_prefix: The file name prefix of the read file, which is used to name the file when storing the result
#Use gpt to evaluate the harmfulness of the generated results,
#and two files will be generated in different directories of the evaluated file to save the results
def GPT_eval(data_path, gpt_output, model_name, eval_filename_prefix):
with open('./templates/eval_template2.json', 'r', encoding="utf8") as file:
template_eval = json.load(file)[0]
gpt_eval_score = []
save_dir = f"./data/{data_path}/"
if eval_filename_prefix is None and os.path.exists(f'{save_dir}gpt_output_score.json'):
with open(f'{save_dir}gpt_output_score.json', 'r', encoding="utf8") as file:
gpt_eval_score = json.load(file)
elif os.path.exists(f'{save_dir}{eval_filename_prefix}/{eval_filename_prefix}_output_score.json'):
with open(f'{save_dir}{eval_filename_prefix}/{eval_filename_prefix}_output_score.json', 'r', encoding="utf8") as file:
gpt_eval_score = json.load(file)
if len(gpt_eval_score) == len(gpt_output):
write_log=False
else:
write_log=True
gpt_output= gpt_output[len(gpt_eval_score):]
fail_index = []
for index, output in enumerate(tqdm(gpt_output)):
fail_times = 0
while True:
| score = GPT_eval_score(text=output, template_eval=template_eval, model_name=model_name) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: bytedance/ColTrack
# Path: util/get_param_dicts.py
def match_name_keywords(n: str, name_keywords: list):
out = False
for b in name_keywords:
if b in n:
out = True
break
return out
# Path: util/get_param_dicts.py
def get_param_dict(args, model_without_ddp: nn.Module):
try:
param_dict_type = args.param_dict_type
except:
param_dict_type = 'default'
assert param_dict_type in ['default', 'ddetr_in_mmdet', 'large_wd']
# by default
if param_dict_type == 'default':
param_dicts = [
{"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]},
{
"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad],
"lr": args.lr_backbone,
}
]
return param_dicts
if param_dict_type == 'ddetr_in_mmdet':
param_dicts = [
{
"params":
[p for n, p in model_without_ddp.named_parameters()
if not match_name_keywords(n, args.lr_backbone_names) and not match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],
"lr": args.lr,
},
{
"params": [p for n, p in model_without_ddp.named_parameters()
if match_name_keywords(n, args.lr_backbone_names) and p.requires_grad],
"lr": args.lr_backbone,
},
{
"params": [p for n, p in model_without_ddp.named_parameters()
if match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],
"lr": args.lr * args.lr_linear_proj_mult,
}
]
return param_dicts
if param_dict_type == 'large_wd':
param_dicts = [
{
"params":
[p for n, p in model_without_ddp.named_parameters()
if not match_name_keywords(n, ['backbone']) and not match_name_keywords(n, ['norm', 'bias']) and p.requires_grad],
},
{
"params": [p for n, p in model_without_ddp.named_parameters()
if match_name_keywords(n, ['backbone']) and match_name_keywords(n, ['norm', 'bias']) and p.requires_grad],
"lr": args.lr_backbone,
"weight_decay": 0.0,
},
{
"params": [p for n, p in model_without_ddp.named_parameters()
if match_name_keywords(n, ['backbone']) and not match_name_keywords(n, ['norm', 'bias']) and p.requires_grad],
"lr": args.lr_backbone,
"weight_decay": args.weight_decay,
},
{
"params":
[p for n, p in model_without_ddp.named_parameters()
if not match_name_keywords(n, ['backbone']) and match_name_keywords(n, ['norm', 'bias']) and p.requires_grad],
"lr": args.lr,
"weight_decay": 0.0,
}
]
# print("param_dicts: {}".format(param_dicts))
return param_dicts
# Path: motlib/utils/model.py
import json
import torch
import torch.nn as nn
from util.get_param_dicts import match_name_keywords
from util.get_param_dicts import get_param_dict as get_param_dict_default
__all__ = ['get_param_dict']
def get_param_dict(args, model_without_ddp: nn.Module):
try:
param_dict_type = args.param_dict_type
except:
param_dict_type = 'default'
assert param_dict_type in ['default', 'ddetr_in_mmdet', 'large_wd', 'finetune']
if param_dict_type == 'finetune':
ft_ignore_param = args.frozen_weights_mot
param_dicts = [
{
"params": [p for n, p in model_without_ddp.named_parameters() if p.requires_grad],
"lr": args.lr
}
]
else:
| param_dicts = get_param_dict_default(args, model_without_ddp) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: alm0ra/mockafka-py
# Path: mockafka/message.py
class Message:
def __init__(self, *args, **kwargs):
self._headers: Optional[dict] = kwargs.get('headers', None)
self._key: Optional[str] = kwargs.get('key', None)
self._value: Optional[str] = kwargs.get('value', None)
self._topic: Optional[str] = kwargs.get('topic', None)
self._offset: Optional[int] = kwargs.get('offset', None)
self._error: Optional[KafkaError] = kwargs.get('error', None)
self._latency: Optional[float] = kwargs.get('latency', None)
self._leader_epoch: Optional[int] = kwargs.get('leader_epoch', None)
self._partition: Optional[int] = kwargs.get('partition', None)
self._timestamp: int = kwargs.get('timestamp', None)
def offset(self, *args, **kwargs):
return self._offset
def latency(self, *args, **kwargs):
return self._latency
def leader_epoch(self, *args, **kwargs):
return self._leader_epoch
def headers(self, *args, **kwargs):
return self._headers
def key(self, *args, **kwargs):
return self._key
def value(self, *args, **kwargs):
return self._value
def timestamp(self, *args, **kwargs):
return self._timestamp
def topic(self, *args, **kwargs):
return self._topic
def error(self):
return self._error
def set_headers(self, *args, **kwargs): # real signature unknown
pass
def set_key(self, *args, **kwargs): # real signature unknown
pass
def set_value(self, *args, **kwargs): # real signature unknown
pass
# Path: mockafka/admin_client.py
class FakeAdminClientImpl:
def __init__(self, clean: bool = False, *args, **kwargs):
def create_partitions(self, partitions: list[NewPartitions]):
def create_partition(self, partition: NewPartitions):
def create_topics(self, topics: list[NewTopic]):
def create_topic(self, topic: NewTopic):
def delete_topics(self, topics, future=None, request_timeout=None, operation_timeout=None):
def delete_topic(self, topic: NewTopic):
def describe_acls(self, acl_binding_filter, future, request_timeout=None):
def describe_configs(self, resources, future, request_timeout=None, broker=None):
def delete_acls(self, acl_binding_filters, future, request_timeout=None):
def alter_configs(self, *args, **kwargs):
def create_acls(self, *args, **kwargs):
def list_groups(self, group=None, *args, **kwargs):
def list_topics(self, topic=None, *args, **kwargs):
def poll(self, timeout=None):
def __len__(self, *args, **kwargs):
# Path: mockafka/kafka_store.py
class SingletonMeta(type):
class KafkaStore(metaclass=SingletonMeta):
def __call__(cls, *args, **kwargs):
def __init__(self, clean: bool = False):
def is_topic_exist(topic: str) -> bool:
def is_partition_exist_on_topic(cls, topic: str, partition_num: int) -> bool:
def get_number_of_partition(topic: str) -> int:
def create_topic(topic: str):
def create_partition(self, topic: str, partitions: int):
def remove_topic(self, topic: str):
def set_first_offset(self, topic: str, partition: int, value: int):
def _add_next_offset(self, topic: str, partition: int):
def get_offset_store_key(self, topic: str, partition: int):
def produce(self, message: Message, topic: str, partition: int):
def get_message(self, topic: str, partition: int, offset: int) -> Message:
def get_partition_first_offset(self, topic: str, partition: int) -> int:
def get_partition_next_offset(self, topic: str, partition: int) -> int:
def topic_list() -> list[str]:
def partition_list(topic: str) -> list[int]:
def get_messages_in_partition(topic: str, partition: int) -> list[Message]:
def number_of_message_in_topic(self, topic: str) -> int:
def clear_topic_messages(self, topic: str):
def clear_partition_messages(topic: str, partition: int):
def reset_offset(self, topic: str, strategy: str = 'latest'):
def fresh():
FIRST_OFFSET = 'first_offset'
NEXT_OFFSET = 'next_offset'
# Path: mockafka/producer.py
class FakeProducer(object):
def __init__(self, config: dict = None):
self.kafka = KafkaStore()
def produce(self, topic, value=None, *args, **kwargs):
# create a message and call produce kafka
message = Message(value=value, topic=topic, *args, **kwargs)
self.kafka.produce(message=message, topic=topic, partition=kwargs['partition'])
def list_topics(self, topic=None, *args, **kwargs):
return ClusterMetadata(topic)
def abort_transaction(self, timeout=None):
# This method Does not support in mockafka
pass
def begin_transaction(self):
# This method Does not support in mockafka
pass
def commit_transaction(self, timeout=None):
# This method Does not support in mockafka
pass
def flush(self, timeout=None):
# This method Does not support in mockafka
return 0
def init_transactions(self, timeout=None):
# This method Does not support in mockafka
pass
def poll(self, timeout=None):
# This method Does not support in mockafka
return 0
def purge(self, in_queue=True, *args, **kwargs):
# This method Does not support in mockafka
pass
def send_offsets_to_transaction(self, positions, group_metadata,
timeout=None):
# This method Does not support in mockafka
pass
# Path: tests/test_producer.py
from unittest import TestCase
from mockafka import Message
from mockafka.admin_client import FakeAdminClientImpl, NewTopic
from mockafka.kafka_store import KafkaStore, KafkaException
from mockafka.producer import FakeProducer
from confluent_kafka import Message
import pytest
class TestFakeProducer(TestCase):
def setUp(self) -> None:
self.kafka = KafkaStore(clean=True)
self.producer = FakeProducer()
| self.admin_client = FakeAdminClientImpl() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: HRI-EU/rosenv
# Path: tests/conftest.py
ROS_2: Final[Literal[2]] = 2
# Path: tests/conftest.py
_T = TypeVar("_T")
ROS_1: Final[Literal[1]] = 1
ROS_2: Final[Literal[2]] = 2
ROS_1_PROJECT_LIST = ["adder", "adder_meta", "adder_srvs", "client", "python_server", "server"]
ROS_2_PROJECT_LIST = ["adder", "adder_srvs", "client", "python_server", "server"]
def get_ros_version() -> Literal[1, 2]:
def ros_distro() -> RosDistribution:
def ros_distro_config() -> DistroConfig:
def resources() -> Path:
def example_project_launch_files(resources: Path) -> Path:
def example_project_ros1(resources: Path) -> Path:
def example_project_ros2(resources: Path) -> Path:
def example_project(example_project_ros1: Path, example_project_ros2: Path) -> Path:
def project_list() -> list[str]:
def test_debs(resources: Path) -> Path:
def catkin_tools(resources: Path) -> Path:
def rosdistro_index(resources: Path) -> Path:
# Path: tests/conftest.py
def get_ros_version() -> Literal[1, 2]:
installed_distro = get_installed_distro_paths()
if len(installed_distro) == 0 or installed_distro[0].name != "noetic":
return ROS_2
return ROS_1
# Path: tests/integration/commands/install/test_install_launch_file_detection.py
import logging
import shutil
import pytest
from pathlib import Path
from cleo.application import Application
from cleo.testers.command_tester import CommandTester
from deb_pkg_tools.package import ArchiveEntry
from deb_pkg_tools.package import inspect_package_contents
from tests.conftest import ROS_2
from tests.conftest import YieldFixture
from tests.conftest import get_ros_version
#
# Copyright (c) Honda Research Institute Europe GmbH
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
from __future__ import annotations
@pytest.fixture()
def _copy_success_launch_file_project(
rosenv_target_path: Path,
example_project_launch_files: Path,
) -> YieldFixture[None]:
target_folder = rosenv_target_path.parent / "src"
target_folder.mkdir(exist_ok=True, parents=True)
failing_project = example_project_launch_files / "src/launch_success"
assert failing_project.exists(), "Failing launch file project doesn't exist!"
shutil.copytree(failing_project, target_folder / "launch_success")
yield
shutil.rmtree(target_folder, ignore_errors=True)
@pytest.fixture()
def _copy_failing_launch_file_project(
rosenv_target_path: Path,
example_project_launch_files: Path,
) -> YieldFixture[None]:
target_folder = rosenv_target_path.parent / "src"
target_folder.mkdir(exist_ok=True, parents=True)
failing_project = example_project_launch_files / "src/launch_fails"
assert failing_project.exists(), "Failing launch file project doesn't exist!"
shutil.copytree(failing_project, target_folder / "launch_fails")
yield
shutil.rmtree(target_folder, ignore_errors=True)
| @pytest.mark.skipif(get_ros_version() == ROS_2, reason="Launchfile-Checks only work in ROS1 currently") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: CuriseJia/FreeStyleRet
# Path: src/utils/utils.py
def getR1Accuary(prob):
temp = prob.detach().cpu().numpy()
temp = np.argsort(temp, axis=1)
count = 0
for i in range(prob.shape[0]):
if temp[i][prob.shape[1]-1] == i:
count+=1
acc = count/prob.shape[0]
return acc
# Path: src/utils/utils.py
def getR5Accuary(prob):
temp = prob.detach().cpu().numpy()
temp = np.argsort(temp, axis=1)
count = 0
for i in range(prob.shape[0]):
for j in range(prob.shape[1]-4,prob.shape[1]):
if temp[i][j] == i:
count+=1
acc = count/prob.shape[0]
return acc
# Path: src/utils/utils.py
def setup_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
cudnn.benchmark = True
# Path: src/dataset/data.py
class I2ITestDataset(Dataset):
def __init__(self, style, root_path, json_path, image_transform):
self.style = style
self.root_path = root_path
self.dataset = json.load(open(json_path,'r'))
self.image_transform = image_transform
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
ori_path = os.path.join(self.root_path, 'images/'+self.dataset[index]['image'])
pair_path = os.path.join(self.root_path, '{}/'.format(self.style)+self.dataset[index]['image'])
ori_image = self.image_transform(Image.open(ori_path))
pair_image = self.image_transform(Image.open(pair_path))
return [ori_image, pair_image, index]
# Path: src/dataset/data.py
class T2ITestDataset(Dataset):
def __init__(self, root_path, json_path, image_transform):
self.root_path = root_path
self.dataset = json.load(open(json_path,'r'))
self.image_transform = image_transform
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
caption_path = os.path.join(self.root_path, 'text/'+self.dataset[index]['caption'])
image_path = os.path.join(self.root_path, 'images/'+self.dataset[index]['image'])
f = open(caption_path, 'r')
caption = f.readline().replace('\n', '')
pair_image = self.image_transform(Image.open(image_path))
return [caption, pair_image, index]
# Path: comparison_test/imagebind_test.py
import torch
import argparse
import sys
import json
import os
import time
from tqdm import tqdm
from open_clip.factory import image_transform
from torch.utils.data import DataLoader
from src.utils import setup_seed, getR1Accuary, getR5Accuary
from src.dataset import I2ITestDataset, T2ITestDataset
from ImageBind.imagebind import data, ModalityType, imagebind_model
from prompt_model import Prompt_ImageBind
image_mean = (0.48145466, 0.4578275, 0.40821073)
image_std = (0.26861954, 0.26130258, 0.27577711)
def parse_args():
parser = argparse.ArgumentParser(description='Parse args for Prompt_ImageBind or Origin_ImageBind test on DSR dataset.')
# project settings
parser.add_argument('--origin_resume', default='', type=str, help='load origin model checkpoint from given path')
parser.add_argument('--prompt_resume', default='', type=str, help='load prompt model checkpoint from given path')
parser.add_argument('--device', default='cuda:0')
parser.add_argument('--num_workers', default=6, type=int)
# data settings
parser.add_argument("--type", type=str, default='style2image', help='choose train text2image or style2image.')
parser.add_argument("--style", type=str, default='sketch', help='choose sketch, art or mosaic.')
parser.add_argument("--test_dataset_path", type=str, default='DSR/')
parser.add_argument("--test_json_path", type=str, default='DSR/test.json')
parser.add_argument("--batch_size", type=int, default=24)
# model settings
parser.add_argument('--model', type=str, default='prompt', help='prompt-imagebind or imagebind-huge.')
parser.add_argument('--n_prompts', type=int, default=3)
parser.add_argument('--prompt_dim', type=int, default=50176)
args = parser.parse_args()
return args
def S2IRetrieval(args, model, ori_feat, pair_feat):
t1 = time.time()
if args.model == 'prompt':
ori_feat = model(ori_feat, dtype='image')
ske_feat = model(pair_feat, mode='image')
prob = torch.softmax(ske_feat @ ori_feat.T, dim=-1)
else:
with torch.no_grad():
ori_feat = model(ori_feat)
ske_feat = model(pair_feat)
prob = torch.softmax(ske_feat[ModalityType.VISION] @ ori_feat[ModalityType.VISION].T, dim=-1)
t2 = time.time()
print('inference a batch costs {}ms'.format((t2-t1)*1000))
return prob
def T2IRetrieval(args, model, ori_feat, pair_feat):
t1 = time.time()
if args.model == 'prompt':
ori_feat = model(ori_feat, dtype='image')
ske_feat = model(pair_feat, mode='text')
else:
with torch.no_grad():
ori_feat = model(ori_feat)
ske_feat = model(pair_feat)
prob = torch.softmax(ske_feat[ModalityType.TEXT] @ ori_feat[ModalityType.VISION].T, dim=-1)
t2 = time.time()
print('inference a batch costs {}ms'.format((t2-t1)*1000))
return prob
if __name__ == "__main__":
args = parse_args()
setup_seed(args.seed)
pair = json.load(open(args.test_json_path, 'r'))
if args.model == 'prompt':
model = Prompt_ImageBind(args)
model.load_state_dict(torch.load(args.prompt_resume))
else:
model = imagebind_model.imagebind_huge(args.origin_resume)
model.eval()
model.to(args.device)
r1 = []
r5 = []
rang = int(len(pair)/args.batch_size)
pre_process_val = image_transform(224, True, image_mean, image_std)
if args.type == 'text2image':
test_dataset = T2ITestDataset(args.test_dataset_path, args.test_json_path, pre_process_val)
else:
| test_dataset = I2ITestDataset(args.test_dataset_path, args.test_json_path, pre_process_val) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: liuqidong07/MOELoRA-peft
# Path: src/MLoRA/peft/utils/config.py
class PeftType(str, enum.Enum):
PROMPT_TUNING = "PROMPT_TUNING"
P_TUNING = "P_TUNING"
PREFIX_TUNING = "PREFIX_TUNING"
LORA = "LORA"
ADALORA = "ADALORA"
ADAPTION_PROMPT = "ADAPTION_PROMPT"
MMOELORAS = "MMOELORAS"
# Path: src/MLoRA/peft/utils/config.py
class PromptLearningConfig(PeftConfig):
"""
This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or
[`PromptTuning`].
Args:
num_virtual_tokens (`int`): The number of virtual tokens to use.
token_dim (`int`): The hidden embedding dimension of the base transformer model.
num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.
num_attention_heads (`int`): The number of attention heads in the base transformer model.
num_layers (`int`): The number of layers in the base transformer model.
"""
num_virtual_tokens: int = field(default=None, metadata={"help": "Number of virtual tokens"})
token_dim: int = field(
default=None, metadata={"help": "The hidden embedding dimension of the base transformer model"}
)
num_transformer_submodules: Optional[int] = field(
default=None, metadata={"help": "Number of transformer submodules"}
)
num_attention_heads: Optional[int] = field(default=None, metadata={"help": "Number of attention heads"})
num_layers: Optional[int] = field(default=None, metadata={"help": "Number of transformer layers"})
# Path: src/MLoRA/peft/tuners/prompt_tuning.py
import enum
import math
import torch
from dataclasses import dataclass, field
from typing import Optional, Union
from ..utils import PeftType, PromptLearningConfig
from transformers import AutoTokenizer
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class PromptTuningInit(str, enum.Enum):
TEXT = "TEXT"
RANDOM = "RANDOM"
@dataclass
class PromptTuningConfig(PromptLearningConfig):
"""
This is the configuration class to store the configuration of a [`PromptEmbedding`].
Args:
prompt_tuning_init (Union[[`PromptTuningInit`], `str`]): The initialization of the prompt embedding.
prompt_tuning_init_text (`str`, *optional*):
The text to initialize the prompt embedding. Only used if `prompt_tuning_init` is `TEXT`.
tokenizer_name_or_path (`str`, *optional*):
The name or path of the tokenizer. Only used if `prompt_tuning_init` is `TEXT`.
"""
prompt_tuning_init: Union[PromptTuningInit, str] = field(
default=PromptTuningInit.RANDOM,
metadata={"help": "How to initialize the prompt tuning parameters"},
)
prompt_tuning_init_text: Optional[str] = field(
default=None,
metadata={
"help": "The text to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`"
},
)
tokenizer_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`"
},
)
def __post_init__(self):
| self.peft_type = PeftType.PROMPT_TUNING |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: voyage-ai/voyageai-python
# Path: voyageai/error.py
class VoyageError(Exception):
class APIError(VoyageError):
class TryAgain(VoyageError):
class Timeout(VoyageError):
class APIConnectionError(VoyageError):
class InvalidRequestError(VoyageError):
class MalformedRequestError(VoyageError):
class AuthenticationError(VoyageError):
class RateLimitError(VoyageError):
class ServerError(VoyageError):
class ServiceUnavailableError(VoyageError):
def __init__(
self,
message=None,
http_body=None,
http_status=None,
json_body=None,
headers=None,
code=None,
):
def __str__(self):
def user_message(self):
def __repr__(self):
def construct_error_object(self):
def __init__(
self,
message,
http_body=None,
http_status=None,
json_body=None,
headers=None,
code=None,
should_retry=False,
):
# Path: voyageai/util.py
VOYAGE_LOG = os.environ.get("VOYAGE_LOG")
VOYAGE = 1
class ApiType(Enum):
def from_str(label):
def _console_log_level():
def log_debug(message, **params):
def log_info(message, **params):
def log_warn(message, **params):
def logfmt(props):
def fmt(key, val):
def convert_to_voyage_object(resp):
def convert_to_dict(obj):
def merge_dicts(x, y):
def default_api_key() -> str:
# Path: voyageai/version.py
VERSION = "0.1.6"
# Path: voyageai/api_resources/voyage_response.py
class VoyageResponse:
def __init__(self, data, headers):
self._headers = headers
self.data = data
@property
def request_id(self) -> Optional[str]:
return self._headers.get("request-id")
@property
def retry_after(self) -> Optional[int]:
try:
return int(self._headers.get("retry-after"))
except TypeError:
return None
@property
def operation_location(self) -> Optional[str]:
return self._headers.get("operation-location")
@property
def organization(self) -> Optional[str]:
return self._headers.get("Voyage-Organization")
@property
def response_ms(self) -> Optional[int]:
h = self._headers.get("Voyage-Processing-Ms")
return None if h is None else round(float(h))
# Path: voyageai/util.py
class ApiType(Enum):
VOYAGE = 1
@staticmethod
def from_str(label):
if label.lower() == "voyage":
return ApiType.VOYAGE
else:
raise voyageai.error.InvalidAPIType(
"The API type provided in invalid. Please select one of the supported API types: 'voyage'"
)
# Path: voyageai/api_resources/api_requestor.py
import asyncio
import json
import time
import platform
import sys
import threading
import time
import warnings
import aiohttp
import requests
import voyageai
from json import JSONDecodeError
from typing import (
AsyncContextManager,
AsyncGenerator,
Callable,
Dict,
Iterator,
Optional,
Tuple,
Union,
overload,
)
from urllib.parse import urlencode, urlsplit, urlunsplit
from typing import Literal
from typing_extensions import Literal
from voyageai import error, util, version
from voyageai.api_resources.voyage_response import VoyageResponse
from voyageai.util import ApiType
if sys.version_info >= (3, 8):
else:
TIMEOUT_SECS = 600
MAX_SESSION_LIFETIME_SECS = 180
MAX_CONNECTION_RETRIES = 2
# Has one attribute per thread, 'session'.
_thread_context = threading.local()
def _build_api_url(url, query):
scheme, netloc, path, base_query, fragment = urlsplit(url)
if base_query:
query = "%s&%s" % (base_query, query)
return urlunsplit((scheme, netloc, path, query, fragment))
def _requests_proxies_arg(proxy) -> Optional[Dict[str, str]]:
"""Returns a value suitable for the 'proxies' argument to 'requests.request."""
if proxy is None:
return None
elif isinstance(proxy, str):
return {"http": proxy, "https": proxy}
elif isinstance(proxy, dict):
return proxy.copy()
else:
raise ValueError(
"'voyageai.proxy' must be specified as either a string URL or a dict with string URL under the https and/or http keys."
)
def _aiohttp_proxies_arg(proxy) -> Optional[str]:
"""Returns a value suitable for the 'proxies' argument to 'aiohttp.ClientSession.request."""
if proxy is None:
return None
elif isinstance(proxy, str):
return proxy
elif isinstance(proxy, dict):
return proxy["https"] if "https" in proxy else proxy["http"]
else:
raise ValueError(
"'voyageai.proxy' must be specified as either a string URL or a dict with string URL under the https and/or http keys."
)
def _make_session() -> requests.Session:
if voyageai.requestssession:
if isinstance(voyageai.requestssession, requests.Session):
return voyageai.requestssession
return voyageai.requestssession()
if not voyageai.verify_ssl_certs:
warnings.warn("verify_ssl_certs is ignored; voyageai always verifies.")
s = requests.Session()
proxies = _requests_proxies_arg(voyageai.proxy)
if proxies:
s.proxies = proxies
s.mount(
"https://",
requests.adapters.HTTPAdapter(max_retries=MAX_CONNECTION_RETRIES),
)
return s
def parse_stream_helper(line: bytes) -> Optional[str]:
if line and line.startswith(b"data:"):
if line.startswith(b"data: "):
# SSE event may be valid when it contain whitespace
line = line[len(b"data: "):]
else:
line = line[len(b"data:"):]
if line.strip() == b"[DONE]":
# return here will cause GeneratorExit exception in urllib3
# and it will close http connection with TCP Reset
return None
else:
return line.decode("utf-8")
return None
def parse_stream(rbody: Iterator[bytes]) -> Iterator[str]:
for line in rbody:
_line = parse_stream_helper(line)
if _line is not None:
yield _line
async def parse_stream_async(rbody: aiohttp.StreamReader):
async for line in rbody:
_line = parse_stream_helper(line)
if _line is not None:
yield _line
class APIRequestor:
def __init__(
self,
key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
):
self.api_base = api_base or voyageai.api_base
| self.api_key = key or util.default_api_key() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: YuroFR/freqtrade-modded-crypto-trading-bot
# Path: freqtrade/enums/marginmode.py
class MarginMode(str, Enum):
"""
Enum to distinguish between
cross margin/futures margin_mode and
isolated margin/futures margin_mode
"""
CROSS = "cross"
ISOLATED = "isolated"
NONE = ''
# Path: freqtrade/enums/tradingmode.py
class TradingMode(str, Enum):
"""
Enum to distinguish between
spot, margin, futures or any other trading method
"""
SPOT = "spot"
MARGIN = "margin"
FUTURES = "futures"
# Path: tests/conftest.py
EXMS = 'freqtrade.exchange.exchange.Exchange'
# Path: tests/conftest.py
def get_mock_coro(return_value=None, side_effect=None):
async def mock_coro(*args, **kwargs):
if side_effect:
if isinstance(side_effect, list):
effect = side_effect.pop(0)
else:
effect = side_effect
if isinstance(effect, Exception):
raise effect
if callable(effect):
return effect(*args, **kwargs)
return effect
else:
return return_value
return Mock(wraps=mock_coro)
# Path: tests/conftest.py
def get_patched_exchange(mocker, config, api_mock=None, id='binance',
mock_markets=True, mock_supported_modes=True) -> Exchange:
patch_exchange(mocker, api_mock, id, mock_markets, mock_supported_modes)
config['exchange']['name'] = id
try:
exchange = ExchangeResolver.load_exchange(config, load_leverage_tiers=True)
except ImportError:
exchange = Exchange(config)
return exchange
# Path: tests/exchange/test_exchange.py
def ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name,
fun, mock_ccxt_fun, retries=API_RETRY_COUNT + 1, **kwargs):
with patch('freqtrade.exchange.common.time.sleep'):
with pytest.raises(DDosProtection):
api_mock.__dict__[mock_ccxt_fun] = MagicMock(side_effect=ccxt.DDoSProtection("DDos"))
exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name)
getattr(exchange, fun)(**kwargs)
assert api_mock.__dict__[mock_ccxt_fun].call_count == retries
with pytest.raises(TemporaryError):
api_mock.__dict__[mock_ccxt_fun] = MagicMock(side_effect=ccxt.NetworkError("DeaDBeef"))
exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name)
getattr(exchange, fun)(**kwargs)
assert api_mock.__dict__[mock_ccxt_fun].call_count == retries
with pytest.raises(OperationalException):
api_mock.__dict__[mock_ccxt_fun] = MagicMock(side_effect=ccxt.BaseError("DeadBeef"))
exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name)
getattr(exchange, fun)(**kwargs)
assert api_mock.__dict__[mock_ccxt_fun].call_count == 1
# Path: tests/exchange/test_bybit.py
from datetime import datetime, timedelta, timezone
from unittest.mock import MagicMock
from freqtrade.enums.marginmode import MarginMode
from freqtrade.enums.tradingmode import TradingMode
from tests.conftest import EXMS, get_mock_coro, get_patched_exchange
from tests.exchange.test_exchange import ccxt_exceptionhandlers
def test_additional_exchange_init_bybit(default_conf, mocker):
default_conf['dry_run'] = False
default_conf['trading_mode'] = TradingMode.FUTURES
| default_conf['margin_mode'] = MarginMode.ISOLATED |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: yanzhh/HGERE
# Path: transformers/src/transformers/file_utils.py
def is_tf_available():
return _tf_available
# Path: transformers/src/transformers/file_utils.py
def is_torch_available():
return _torch_available
# Path: transformers/src/transformers/tokenization_bert.py
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
# Path: transformers/src/transformers/data/processors/utils.py
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_example_from_tensor_dict(self, tensor_dict):
"""Gets an example from a dict with tensorflow tensors
Args:
tensor_dict: Keys and values should match the corresponding Glue
tensorflow_dataset examples.
"""
raise NotImplementedError()
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def tfds_map(self, example):
"""Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are.
This method converts examples to the correct format."""
if len(self.get_labels()) > 1:
example.label = self.get_labels()[int(example.label)]
return example
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
return list(csv.reader(f, delimiter="\t", quotechar=quotechar))
# Path: transformers/src/transformers/data/processors/squad.py
import json
import logging
import os
import numpy as np
import torch
import tensorflow as tf
from functools import partial
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
from ...file_utils import is_tf_available, is_torch_available
from ...tokenization_bert import whitespace_tokenize
from .utils import DataProcessor
from torch.utils.data import TensorDataset
if is_torch_available():
if is_tf_available():
logger = logging.getLogger(__name__)
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start : (new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _new_check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# if len(doc_spans) == 1:
# return True
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span["start"] + doc_span["length"] - 1
if position < doc_span["start"]:
continue
if position > end:
continue
num_left_context = position - doc_span["start"]
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"]
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def squad_convert_example_to_features(example, max_seq_length, doc_stride, max_query_length, is_training):
features = []
# print (is_training)
if is_training and not example.is_impossible:
# Get start and end position
start_position = example.start_position
end_position = example.end_position
# print (start_position, end_position, example.answer_text)
# If the answer cannot be found in the text, then skip this example.
actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)])
| cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: generative-skill-chaining/gsc-code
# Path: generative_skill_chaining/networks/mlp.py
class LFF(torch.nn.Module):
"""
get torch.std_mean(self.B)
"""
def __init__(self, in_features, out_features, scale=1.0, init="iso", sincos=False):
super().__init__()
self.in_features = in_features
self.sincos = sincos
self.out_features = out_features
self.scale = scale
if self.sincos:
self.linear = torch.nn.Linear(in_features, self.out_features // 2)
else:
self.linear = torch.nn.Linear(in_features, self.out_features)
if init == "iso":
torch.nn.init.normal_(self.linear.weight, 0, scale / self.in_features)
torch.nn.init.normal_(self.linear.bias, 0, 1)
else:
torch.nn.init.uniform_(
self.linear.weight, -scale / self.in_features, scale / self.in_features
)
torch.nn.init.uniform_(self.linear.bias, -1, 1)
if self.sincos:
torch.nn.init.zeros_(self.linear.bias)
def forward(self, x, **_):
x = np.pi * self.linear(x)
if self.sincos:
return torch.cat([torch.sin(x), torch.cos(x)], dim=-1)
else:
return torch.sin(x)
# Path: generative_skill_chaining/networks/mlp.py
class MLP(torch.nn.Module):
def __init__(
self,
input_dim: int,
output_dim: int,
hidden_layers: Sequence[int] = [256, 256],
act: Type[torch.nn.Module] = torch.nn.ReLU,
output_act: Optional[Type[torch.nn.Module]] = None,
):
super().__init__()
net: List[torch.nn.Module] = []
last_dim = input_dim
for dim in hidden_layers:
net.append(torch.nn.Linear(last_dim, dim))
net.append(act())
last_dim = dim
net.append(torch.nn.Linear(last_dim, output_dim))
if output_act is not None:
net.append(output_act())
self.net = torch.nn.Sequential(*net)
def forward(self, x):
return self.net(x)
# Path: generative_skill_chaining/networks/mlp.py
def weight_init(m):
if isinstance(m, torch.nn.Linear):
torch.nn.init.orthogonal_(m.weight.data)
if hasattr(m.bias, "data"):
m.bias.data.fill_(0.0)
# Path: generative_skill_chaining/networks/actors/base.py
class Actor(torch.nn.Module, abc.ABC):
def forward(self, state: torch.Tensor) -> torch.distributions.Distribution:
def predict(self, state: torch.Tensor, sample: bool = False) -> torch.Tensor:
# Path: generative_skill_chaining/networks/utils.py
class SquashedNormal(torch.distributions.TransformedDistribution):
def __init__(self, loc, scale):
self._loc = loc
self.scale = scale
self.base_dist = torch.distributions.Normal(loc, scale)
transforms = [torch.distributions.transforms.TanhTransform(cache_size=1)]
super().__init__(self.base_dist, transforms)
@property
def loc(self):
loc = self._loc
for transform in self.transforms:
loc = transform(loc)
return loc
# Path: generative_skill_chaining/networks/actors/mlp.py
from typing import Optional, Sequence, Type
from generative_skill_chaining.networks.mlp import LFF, MLP, weight_init
from generative_skill_chaining.networks.actors import base
from generative_skill_chaining.networks.utils import SquashedNormal
import gym
import torch
class ContinuousMLPActor(base.Actor):
def __init__(
self,
state_space: gym.spaces.Box,
action_space: gym.spaces.Box,
hidden_layers: Sequence[int] = [256, 256],
act: Type[torch.nn.Module] = torch.nn.ReLU,
output_act: Type[torch.nn.Module] = torch.nn.Tanh,
ortho_init: bool = False,
):
super().__init__()
self.mlp = MLP(
state_space.shape[0],
action_space.shape[0],
hidden_layers=hidden_layers,
act=act,
output_act=output_act,
)
if ortho_init:
| self.apply(weight_init) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ChiyuSONG/dynamics-of-instruction-tuning
# Path: train_sft.py
IGNORE_INDEX = -100
# Path: train_sft.py
class DataCollatorForSupervisedDataset(object):
"""Collate examples for supervised fine-tuning."""
tokenizer: transformers.PreTrainedTokenizer
pad_to_multiple_of: Optional[int] = None
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
input_ids, labels, attention_mask = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels", "attention_mask"))
max_label_length = max(len(l) for l in labels)
if self.pad_to_multiple_of is not None:
max_label_length = (
(max_label_length + self.pad_to_multiple_of - 1)
// self.pad_to_multiple_of
* self.pad_to_multiple_of
)
input_ids = self.pad_sequence(input_ids, self.tokenizer.pad_token_id, max_label_length)
labels = self.pad_sequence(labels, IGNORE_INDEX, max_label_length)
attention_mask = self.pad_sequence(attention_mask, 0, max_label_length)
return dict(
input_ids=input_ids,
labels=labels,
attention_mask=attention_mask,
)
def pad_sequence(self, feature, padding_value, max_label_length):
for idx, instance in enumerate(feature):
remainder = torch.LongTensor( [padding_value] * (max_label_length - len(instance)) )
feature[idx] = torch.cat((instance, remainder), 0) if self.tokenizer.padding_side == "right" \
else torch.cat((remainder, instance), 0)
return torch.stack(feature, dim = 0)
# Path: train_sft.py
ATTR_TO_SPECIAL_TOKEN = {"additional_special_tokens": ["<user>", "<assistant>", "<eot>"]}
# Path: inference.py
import torch
from transformers import (
LlamaForCausalLM,
LlamaTokenizer,
set_seed,
GenerationConfig
)
from train_sft import IGNORE_INDEX, DataCollatorForSupervisedDataset, ATTR_TO_SPECIAL_TOKEN
def process(batch, tokenizer):
processed = []
user = tokenizer.user_token_id
assistant = tokenizer.assistant_token_id
eot = tokenizer.eot_token_id
def tokenize(s):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(s.strip()))
for example in batch:
input_ids = []
labels = []
messages = []
messages.extend(example["messages"])
for message in messages:
input_ids.append(user if message["role"] == "user" else assistant)
labels.append(IGNORE_INDEX)
content = tokenize(message["content"]) + [eot]
input_ids.extend(content)
labels.extend([IGNORE_INDEX]*len(content) if message["role"] == "user" else content)
input_ids.append(assistant)
labels.append(IGNORE_INDEX)
assert len(input_ids) == len(labels)
attention_mask = [1] * len(input_ids)
processed.append( {'input_ids':torch.LongTensor(input_ids), 'labels': torch.LongTensor(labels), 'attention_mask': torch.LongTensor(attention_mask)} )
return processed
class Assistant:
def __init__(self, model_name_or_path):
tokenizer = LlamaTokenizer.from_pretrained(model_name_or_path)
tokenizer.padding_side = "left"
tokenizer.user_token_id, tokenizer.assistant_token_id, tokenizer.eot_token_id \
= tokenizer.convert_tokens_to_ids(ATTR_TO_SPECIAL_TOKEN["additional_special_tokens"])
model = LlamaForCausalLM.from_pretrained(model_name_or_path, device_map="auto")
model.tie_weights()
model.eval()
self.tokenizer = tokenizer
self.model = model
self.seed = 0
# use greedy decoding as default
self.config = GenerationConfig(
max_new_tokens=1024,
min_length=1,
do_sample=False,
output_scores=True,
return_dict_in_generate=True,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=[tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.unk_token_id,
tokenizer.eot_token_id, tokenizer.user_token_id, tokenizer.assistant_token_id],
)
set_seed(self.seed)
def inference(self, batch):
processed = process(batch, tokenizer=self.tokenizer)
| data_collator = DataCollatorForSupervisedDataset(tokenizer=self.tokenizer, pad_to_multiple_of=8) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: akashgreninja/GreSec
# Path: backend/venv/lib/python3.10/site-packages/pydantic/annotated_handlers.py
class GetCoreSchemaHandler:
"""Handler to call into the next CoreSchema schema generation function."""
def __call__(self, __source_type: Any) -> core_schema.CoreSchema:
"""Call the inner handler and get the CoreSchema it returns.
This will call the next CoreSchema modifying function up until it calls
into Pydantic's internal schema generation machinery, which will raise a
`pydantic.errors.PydanticSchemaGenerationError` error if it cannot generate
a CoreSchema for the given source type.
Args:
__source_type: The input type.
Returns:
CoreSchema: The `pydantic-core` CoreSchema generated.
"""
raise NotImplementedError
def generate_schema(self, __source_type: Any) -> core_schema.CoreSchema:
"""Generate a schema unrelated to the current context.
Use this function if e.g. you are handling schema generation for a sequence
and want to generate a schema for its items.
Otherwise, you may end up doing something like applying a `min_length` constraint
that was intended for the sequence itself to its items!
Args:
__source_type: The input type.
Returns:
CoreSchema: The `pydantic-core` CoreSchema generated.
"""
raise NotImplementedError
def resolve_ref_schema(self, __maybe_ref_schema: core_schema.CoreSchema) -> core_schema.CoreSchema:
"""Get the real schema for a `definition-ref` schema.
If the schema given is not a `definition-ref` schema, it will be returned as is.
This means you don't have to check before calling this function.
Args:
__maybe_ref_schema: A `CoreSchema`, `ref`-based or not.
Raises:
LookupError: If the `ref` is not found.
Returns:
A concrete `CoreSchema`.
"""
raise NotImplementedError
@property
def field_name(self) -> str | None:
"""Get the name of the closest field to this validator."""
raise NotImplementedError
def _get_types_namespace(self) -> dict[str, Any] | None:
"""Internal method used during type resolution for serializer annotations."""
raise NotImplementedError
# Path: backend/venv/lib/python3.10/site-packages/pydantic/annotated_handlers.py
class GetJsonSchemaHandler:
"""Handler to call into the next JSON schema generation function.
Attributes:
mode: Json schema mode, can be `validation` or `serialization`.
"""
mode: JsonSchemaMode
def __call__(self, __core_schema: CoreSchemaOrField) -> JsonSchemaValue:
"""Call the inner handler and get the JsonSchemaValue it returns.
This will call the next JSON schema modifying function up until it calls
into `pydantic.json_schema.GenerateJsonSchema`, which will raise a
`pydantic.errors.PydanticInvalidForJsonSchema` error if it cannot generate
a JSON schema.
Args:
__core_schema: A `pydantic_core.core_schema.CoreSchema`.
Returns:
JsonSchemaValue: The JSON schema generated by the inner JSON schema modify
functions.
"""
raise NotImplementedError
def resolve_ref_schema(self, __maybe_ref_json_schema: JsonSchemaValue) -> JsonSchemaValue:
"""Get the real schema for a `{"$ref": ...}` schema.
If the schema given is not a `$ref` schema, it will be returned as is.
This means you don't have to check before calling this function.
Args:
__maybe_ref_json_schema: A JsonSchemaValue, ref based or not.
Raises:
LookupError: If the ref is not found.
Returns:
JsonSchemaValue: A JsonSchemaValue that has no `$ref`.
"""
raise NotImplementedError
# Path: backend/venv/lib/python3.10/site-packages/pydantic/_internal/_schema_generation_shared.py
from typing import TYPE_CHECKING, Any, Callable
from pydantic_core import core_schema
from typing_extensions import Literal
from ..annotated_handlers import GetCoreSchemaHandler, GetJsonSchemaHandler
from ..json_schema import GenerateJsonSchema, JsonSchemaValue
from ._core_utils import CoreSchemaOrField
from ._generate_schema import GenerateSchema
"""Types and utility functions used by various other internal tools."""
from __future__ import annotations
if TYPE_CHECKING:
GetJsonSchemaFunction = Callable[[CoreSchemaOrField, GetJsonSchemaHandler], JsonSchemaValue]
HandlerOverride = Callable[[CoreSchemaOrField], JsonSchemaValue]
class GenerateJsonSchemaHandler(GetJsonSchemaHandler):
"""JsonSchemaHandler implementation that doesn't do ref unwrapping by default.
This is used for any Annotated metadata so that we don't end up with conflicting
modifications to the definition schema.
Used internally by Pydantic, please do not rely on this implementation.
See `GetJsonSchemaHandler` for the handler API.
"""
def __init__(self, generate_json_schema: GenerateJsonSchema, handler_override: HandlerOverride | None) -> None:
self.generate_json_schema = generate_json_schema
self.handler = handler_override or generate_json_schema.generate_inner
self.mode = generate_json_schema.mode
def __call__(self, __core_schema: CoreSchemaOrField) -> JsonSchemaValue:
return self.handler(__core_schema)
def resolve_ref_schema(self, maybe_ref_json_schema: JsonSchemaValue) -> JsonSchemaValue:
"""Resolves `$ref` in the json schema.
This returns the input json schema if there is no `$ref` in json schema.
Args:
maybe_ref_json_schema: The input json schema that may contains `$ref`.
Returns:
Resolved json schema.
Raises:
LookupError: If it can't find the definition for `$ref`.
"""
if '$ref' not in maybe_ref_json_schema:
return maybe_ref_json_schema
ref = maybe_ref_json_schema['$ref']
json_schema = self.generate_json_schema.get_schema_from_definitions(ref)
if json_schema is None:
raise LookupError(
f'Could not find a ref for {ref}.'
' Maybe you tried to call resolve_ref_schema from within a recursive model?'
)
return json_schema
| class CallbackGetCoreSchemaHandler(GetCoreSchemaHandler): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mindsdb/otto
# Path: ottoai/templates.py
INSTRUCTION = """
You write python code, write the simplest and most effective Python function to answer the following.
Question: {question}
Follow these instructions to write the function:
- The function must be called 'runner'
- code should only have the necessary imports and the function runner
- The function shall return a response to the question
- Only import the fewest possible pip modules from this list: ({modules_metadata}),
- Import the minimum number of modules necessary
- The function takes only one argument called 'input' as as follows:
input={input_dictionary_string}
"""
# Path: ottoai/helpers.py
def create_string(arg_data):
def extract_python_code_from_md(md_string):
def get_runner_function(code_string):
# Path: ottoai/classes.py
from ottoai.templates import INSTRUCTION
from ottoai.helpers import llm_completion, create_string, extract_python_code_from_md, get_runner_function
import logging
import os
import json
import pkg_resources
import subprocess
import os
import openai
import logger
import json
class Assistant:
"""
The Assistant class is responsible for managing the skills and conversations.
"""
def __init__(self, name: str, personality: str, llm_engine, model: str, user_context_variables: dict = {}):
"""
Initialize the assistant with a name, personality, language model engine, and model.
"""
self.name = name
self.personality = personality
self.llm_engine = llm_engine
self.model = model
self.pip_skills = []
self.user_context_variables = user_context_variables
def _m(self, messages):
return llm_completion(model=self.model, messages=messages)
def set_user_context_variables(self, user_context_variables: dict = {}):
"""
Set the user context variables for the assistant.
Parameters:
user_context_variables (dict): A dictionary containing the user context variables.
"""
self.user_context_variables = user_context_variables
def add_pip_skill(self, pip_module):
"""
Add a new skill to the assistant.
"""
installed_packages = pkg_resources.working_set
installed_packages_list = sorted(["%s==%s" % (i.key, i.version) for i in installed_packages])
if pip_module not in installed_packages_list:
try:
installed_packages_pip_freeze = subprocess.check_output(["pip", "freeze"]).decode().split('\n')
if pip_module not in (package.split('==')[0] for package in installed_packages_pip_freeze) and pip_module not in (package.split('==')[0] for package in installed_packages_pip_freeze):
raise ImportError(f"Trying to add skill, but pip module {pip_module} is not installed. \nTo solve this try: pip install {pip_module}")
except subprocess.CalledProcessError:
raise ImportError(f"Failed to execute pip freeze.")
self.pip_skills.append(pip_module)
def question(self, text: str):
"""
Send a message to the assistant and return the assistant's response.
"""
response = self.generate_and_run_code_for_question(text)
return response
def start_conversation(self, user_name: str):
"""
Start a new conversation with the user.
"""
return Conversation(self, user_name)
def generate_and_run_code_for_question(self, question, retries_until_figured = 10):
arg_data = {
"context_variables": {key: "<...HIDDEN...>" for key in self.user_context_variables}
}
arg_data_all = {
"context_variables": self.user_context_variables
}
arguments_dictionary_str = create_string(arg_data)
modules_metadata = ", ".join(self.pip_skills)
instruction =INSTRUCTION.format(modules_metadata = modules_metadata, input_dictionary_string = arguments_dictionary_str, question=question)
logging.debug("[OTTO] Generated Instruction: " + instruction)
messages = [{"role": "system", "content": instruction}]
error = 1
code = ''
error_message = None
for _ in range(retries_until_figured):
if error_message:
messages += [{"role": "system", "content":"ran {code} and had this error: {error}".format(code=code, error=error_message)}]
logging.debug("[OTTO] Messages: \n" + json.dumps(messages, indent=4))
resp = self._m(messages)
code = resp['choices'][0]['message']['content']
error_message = None
try:
function_code = extract_python_code_from_md(code)
if function_code is not None:
code = function_code
logging.debug("[OTTO] Generated Code: \n```\n{code}\n\n```\n".format(code = code))
| runner_function = get_runner_function(code) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: adarshxs/TokenTally
# Path: utilities.py
def load_base_models():
with open("models.json", "r") as f:
return json.load(f)
# Path: utilities.py
def load_quantization():
with open("quantization.json", "r") as f:
return json.load(f)
# Path: utilities.py
def load_gpus():
with open("gpus.json", "r") as f:
return json.load(f)
# Path: utilities.py
def load_gpu_providers():
return pd.read_csv('cloud-gpus.csv')
# Path: utilities.py
def convert_params(params):
if params == 0:
return "0"
size_name = ("", "K", "M", "B", "T", "P", "E", "Z", "Y")
i = int(math.floor(math.log(params, 1000)))
p = math.pow(1000, i)
s = round(params / p, 2)
return "%s %s" % (s, size_name[i])
# Path: utilities.py
def compute_bound_tokens_p_sec(flops_per_token, flops_per_gpu, num_gpus):
return (flops_per_gpu * num_gpus * 10**12) / (flops_per_token * 10**9)
# Path: utilities.py
def memory_bound_tokens_p_sec(memory_bandwidth_per_gpu, flops_per_token, num_gpus):
return (memory_bandwidth_per_gpu * num_gpus * 10**12) / (flops_per_token * 10**9)
# Path: utilities.py
def cost_per_1k_tokens(flops_per_token, flops_per_gpu, num_gpus, cost_per_hour, memory_bandwidth_per_gpu):
tokens_p_sec_compute = compute_bound_tokens_p_sec(flops_per_token, flops_per_gpu, num_gpus)
tokens_p_sec_memory = memory_bound_tokens_p_sec(memory_bandwidth_per_gpu, flops_per_token, num_gpus)
cost_p_sec = cost_per_hour / 3600 # cost per second
cost_p_token_compute = cost_p_sec / tokens_p_sec_compute
cost_p_token_memory = cost_p_sec / tokens_p_sec_memory
cost_p_1k_tokens_compute = cost_p_token_compute * 1000
cost_p_1k_tokens_memory = cost_p_token_memory * 1000
return cost_p_1k_tokens_compute, cost_p_1k_tokens_memory
# Path: tools/llm_cost_calculator.py
import streamlit as st
from utilities import load_base_models, load_quantization, load_gpus, load_gpu_providers, convert_params, compute_bound_tokens_p_sec, memory_bound_tokens_p_sec, cost_per_1k_tokens
def display_llm_cost_tool():
st.title("Token Tally: LLM Cost Estimator")
st.subheader("Estimate Your LLM's Token Toll Across Various Platforms and Configurations")
# Base model and configurations data
base_models = load_base_models()
| quantization_data = load_quantization() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: WestlakeIntelligentRobotics/ConsensusLLM-code
# Path: modules/llm/gpt.py
class GPT:
"""
Initialize the GPT class for interacting with OpenAI's GPT model.
GPT provides basic methods for interacting with the model and parsing its
output.
"""
def __init__(self, key: str, model: str = 'gpt-3.5-turbo-0613',
temperature: float = 0.7, keep_memory: bool = True):
"""
Initialize the GPT class.
Args:
key (str): OpenAI API key.
model (str): The model to use (default: gpt-3.5-turbo-0613).
temperature (float): Temperature for text generation (default: 0.7).
keep_memory (bool): Whether to retain memories (default: True).
"""
self._model = model
self._openai_key = key
self._cost = 0
self._memories = []
self._keep_memory = keep_memory
self._temperature = temperature
self._history = []
def get_memories(self):
"""
Get the current memories.
Returns:
list: List of memories.
"""
return self._memories
def get_history(self):
"""
Get the conversation history.
Returns:
list: List of conversation history.
"""
return self._history
def memories_update(self, role: str, content: str):
"""
Update memories to set roles (system, user, assistant) and content,
forming a complete memory.
Args:
role (str): Role (system, user, assistant).
content (str): Content.
Raises:
ValueError: If an unrecognized role is provided or if roles are
added in an incorrect sequence.
"""
if role not in ["system", "user", "assistant"]:
raise ValueError(f"Unrecognized role: {role}")
if role == "system" and len(self._memories) > 0:
raise ValueError('System role can only be added when memories are '
'empty')
if (role == "user" and len(self._memories) > 0 and
self._memories[-1]["role"] == "user"):
raise ValueError('User role can only be added if the previous '
'round was a system or assistant role')
if (role == "assistant" and len(self._memories) > 0 and
self._memories[-1]["role"] != "user"):
raise ValueError('Assistant role can only be added if the previous '
'round was a user role')
self._memories.append({"role": role, "content": content})
self._history.append({"role": role, "content": content})
def generate_answer(self, input: str, try_times=0, **kwargs) -> str:
"""
Interact with the GPT model and generate an answer.
Args:
input (str): Prompt or user input.
try_times (int): Number of attempts (default is 0).
kwargs: Additional parameters for the model.
Returns:
str: Text-based output result.
Raises:
ConnectionError: If there's an error in generating the answer.
"""
if not self._keep_memory:
self._memories = [self._memories[0]]
if try_times == 0:
self._memories.append({"role": "user", "content": input})
self._history.append({"role": "user", "content": input})
else:
if self._memories[-1]["role"] == "assistant":
self._memories = self._memories[:-1]
openai.api_key = self._openai_key
try:
response = openai.ChatCompletion.create(
model=self._model,
messages=self._memories,
temperature=self._temperature,
**kwargs
)
self._cost += response['usage']["total_tokens"]
content = response['choices'][0]['message']['content']
self._memories.append({"role": "assistant", "content": content})
self._history.append({"role": "assistant", "content": content})
return content
except Exception as e:
raise ConnectionError(f"Error in generate_answer: {e}")
# Path: modules/prompt/summarize.py
# Path: modules/prompt/form.py
# Path: modules/llm/agent_2d.py
import re
import numpy as np
from .gpt import GPT
from ..prompt.summarize import summarizer_role
from ..prompt.form import summarizer_output_form
"""
MIT License
Copyright (c) [2023] [Intelligent Unmanned Systems Laboratory at
Westlake University]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM,
OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE, OR OTHER DEALINGS IN
THE SOFTWARE.
"""
| class Agent2D(GPT): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: inngest/inngest-py
# Path: inngest/_internal/client_lib.py
_DEV_SERVER_EVENT_KEY = "NO_EVENT_KEY_SET"
class Inngest:
def api_origin(self) -> str:
def event_api_origin(self) -> str:
def event_key(self) -> str | None:
def signing_key(self) -> str | None:
def __init__(
self,
*,
api_base_url: str | None = None,
app_id: str,
event_api_base_url: str | None = None,
event_key: str | None = None,
is_production: bool | None = None,
logger: types.Logger | None = None,
middleware: list[
type[middleware_lib.Middleware | middleware_lib.MiddlewareSync]
]
| None = None,
signing_key: str | None = None,
) -> None:
def _build_send_request(
self,
events: list[event_lib.Event],
) -> types.MaybeError[httpx.Request]:
def add_middleware(
self,
middleware: type[
middleware_lib.Middleware | middleware_lib.MiddlewareSync
],
) -> None:
def create_function(
self,
*,
batch_events: function_config.Batch | None = None,
cancel: list[function_config.Cancel] | None = None,
debounce: function_config.Debounce | None = None,
fn_id: str,
middleware: list[
type[middleware_lib.Middleware | middleware_lib.MiddlewareSync]
]
| None = None,
name: str | None = None,
on_failure: function.FunctionHandlerAsync
| function.FunctionHandlerSync
| None = None,
rate_limit: function_config.RateLimit | None = None,
retries: int | None = None,
throttle: function_config.Throttle | None = None,
trigger: function_config.TriggerCron | function_config.TriggerEvent,
) -> typing.Callable[
def decorator(
func: function.FunctionHandlerAsync | function.FunctionHandlerSync,
) -> function.Function:
async def send(
self,
events: event_lib.Event | list[event_lib.Event],
) -> list[str]:
def send_sync(
self,
events: event_lib.Event | list[event_lib.Event],
) -> list[str]:
def set_logger(self, logger: types.Logger) -> None:
def _extract_ids(body: object) -> list[str]:
# Path: inngest/_internal/execution.py
class Call(types.BaseModel):
class CallContext(types.BaseModel):
class CallStack(types.BaseModel):
class CallError(types.BaseModel):
class FunctionCallResponse(types.BaseModel):
class StepResponse(types.BaseModel):
class Output(types.BaseModel):
class Opcode(enum.Enum):
def from_error(cls, err: Exception) -> CallError:
def is_step_call_responses(
value: object,
) -> typing.TypeGuard[list[StepResponse]]:
INVOKE = "InvokeFunction"
PLANNED = "StepPlanned"
SLEEP = "Sleep"
STEP = "Step"
WAIT_FOR_EVENT = "WaitForEvent"
UNSPECIFIED_STEP_ID = "step"
# Path: inngest/_internal/function.py
class Context:
class _Config:
class FunctionHandlerAsync(typing.Protocol):
class FunctionHandlerSync(typing.Protocol):
class FunctionOpts(types.BaseModel):
class Function:
class _UserError(Exception):
def __call__(
self,
ctx: Context,
step: step_lib.Step,
) -> typing.Awaitable[types.Serializable]:
def __call__(
self,
ctx: Context,
step: step_lib.StepSync,
) -> types.Serializable:
def _is_function_handler_async(
value: FunctionHandlerAsync | FunctionHandlerSync,
) -> typing.TypeGuard[FunctionHandlerAsync]:
def _is_function_handler_sync(
value: FunctionHandlerAsync | FunctionHandlerSync,
) -> typing.TypeGuard[FunctionHandlerSync]:
def convert_validation_error(
self,
err: pydantic.ValidationError,
) -> BaseException:
def id(self) -> str:
def is_handler_async(self) -> bool:
def is_on_failure_handler_async(self) -> bool | None:
def on_failure_fn_id(self) -> str | None:
def __init__(
self,
opts: FunctionOpts,
trigger: function_config.TriggerCron | function_config.TriggerEvent,
handler: FunctionHandlerAsync | FunctionHandlerSync,
middleware: list[
type[middleware_lib.Middleware | middleware_lib.MiddlewareSync]
]
| None = None,
) -> None:
async def call( # noqa: C901
self,
call: execution.Call,
client: client_lib.Inngest,
ctx: Context,
fn_id: str,
middleware: middleware_lib.MiddlewareManager,
target_hashed_id: str | None,
) -> execution.CallResult:
def call_sync( # noqa: C901
self,
call: execution.Call,
client: client_lib.Inngest,
ctx: Context,
fn_id: str,
middleware: middleware_lib.MiddlewareManager,
target_hashed_id: str | None,
) -> execution.CallResult:
def get_config(self, app_url: str) -> _Config:
def get_id(self) -> str:
def __init__(self, err: Exception) -> None:
def _remove_first_traceback_frame(err: Exception) -> None:
# Path: inngest/_internal/middleware_lib/middleware.py
from inngest._internal import client_lib, execution, function
from __future__ import annotations
class Middleware:
def __init__(self, client: client_lib.Inngest) -> None:
self._client = client
async def after_execution(self) -> None:
"""
After executing new code. Called multiple times per run when using
steps.
"""
return None
async def before_execution(self) -> None:
"""
Before executing new code. Called multiple times per run when using
steps.
"""
return None
async def before_response(self) -> None:
"""
After the output has been set and before the response is sent
back to Inngest. This is where you can perform any final actions before
the response is sent back to Inngest. Called multiple times per run when
using steps. Not called for function middleware.
"""
return None
async def transform_input(
self,
| ctx: function.Context, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: f0uriest/quadax
# Path: quadax/quad_weights.py
def _cc_get_weights(N):
def _get_tmax(xmax):
D = 2 / N * np.cos(k[:, None] * n[None, :] * np.pi / (N // 2))
D = np.where((n == 0) | (n == N // 2), D * 1 / 2, D)
N = int(2 * 2**i)
N = int(2 * 2**i)
# Path: quadax/utils.py
def wrap_func(fun, args):
"""Vectorize, jit, and mask out inf/nan."""
f = jax.eval_shape(fun, jnp.array(0.0), *args)
# need to make sure we get the correct shape for array valued integrands
outsig = "(" + ",".join("n" + str(i) for i in range(len(f.shape))) + ")"
@jax.jit
@partial(jnp.vectorize, signature="()->" + outsig)
def wrapped(x):
f = fun(x, *args)
return jnp.where(jnp.isfinite(f), f, 0.0)
return wrapped
# Path: quadax/fixed_order.py
import functools
import jax
import jax.numpy as jnp
from .quad_weights import cc_weights, gk_weights, ts_weights
from .utils import wrap_func
"""Fixed order quadrature."""
def _dot(w, f):
return jnp.sum(w * f.T, axis=-1).T
@functools.partial(jax.jit, static_argnums=(0, 4, 5))
def fixed_quadgk(fun, a, b, args=(), norm=jnp.inf, n=21):
"""Integrate a function from a to b using a fixed order Gauss-Konrod rule.
Integration is performed using an order n Konrod rule with error estimated
using an embedded n//2 order Gauss rule.
Parameters
----------
fun : callable
Function to integrate, should have a signature of the form
``fun(x, *args)`` -> float, Array. Should be JAX transformable.
a, b : float
Lower and upper limits of integration. Must be finite.
args : tuple, optional
Extra arguments passed to fun.
norm : int, callable
Norm to use for measuring error for vector valued integrands. No effect if the
integrand is scalar valued. If an int, uses p-norm of the given order, otherwise
should be callable.
n : {15, 21, 31, 41, 51, 61}
Order of integration scheme.
Returns
-------
y : float, Array
Estimate of the integral of fun from a to b
err : float
Estimate of the absolute error in y from nested Gauss rule.
y_abs : float, Array
Estimate of the integral of abs(fun) from a to b
y_mmn : float, Array
Estimate of the integral of abs(fun - <fun>) from a to b, where <fun>
is the mean value of fun over the interval.
"""
_norm = norm if callable(norm) else lambda x: jnp.linalg.norm(x.flatten(), ord=norm)
vfun = wrap_func(fun, args)
def truefun():
f = jax.eval_shape(vfun, jnp.array(0.0))
z = jnp.zeros(f.shape, f.dtype)
return z, 0.0, z, z
def falsefun():
try:
xk, wk, wg = (
| gk_weights[n]["xk"], |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: smonsays/metax
# Path: metax/data/base.py
DATAPATH = Path(os.path.expanduser("~/data/jax"))
# Path: metax/data/base.py
class Dataset(NamedTuple):
x: Array
y: Array
info: Dict = dict()
# Path: metax/data/base.py
class MetaDataset(NamedTuple):
train: Union[Dataset, MultitaskDataset]
test: Union[Dataset, MultitaskDataset]
# Path: examples/maml-omniglot.py
import argparse
import jax
import jax.numpy as jnp
import jax.tree_util as jtu
import optax
import metax
from jax_meta.datasets import Omniglot
from metax.data.base import DATAPATH, Dataset, MetaDataset
"""
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=None)
parser.add_argument("--bn_decay", type=float, default=0.9)
parser.add_argument("--channels", type=int, default=64)
parser.add_argument("--num_tasks_test", type=int, default=100)
parser.add_argument("--num_tasks_train", type=int, default=10000)
parser.add_argument("--num_tasks_valid", type=int, default=10)
parser.add_argument("--ways", type=int, default=5)
parser.add_argument("--shots_test", type=int, default=10)
parser.add_argument("--shots_train", type=int, default=10)
parser.add_argument("--first_order", type=bool, default=False)
parser.add_argument("--lr_inner", type=float, default=0.4)
parser.add_argument("--lr_outer", type=float, default=0.001)
parser.add_argument("--meta_batch_size", type=int, default=16)
parser.add_argument("--steps_inner", type=int, default=1)
parser.add_argument("--steps_outer", type=int, default=100)
parser.add_argument("--seed", type=int, default=2022)
args = parser.parse_args()
# Load data from [jax_meta](https://github.com/tristandeleu/jax-meta-learning)
metaloader = Omniglot(
DATAPATH,
batch_size=args.meta_batch_size,
shots=args.shots_train,
ways=args.ways,
)
metaloader.input_shape = metaloader.shape
metaloader.output_dim = metaloader.ways
metaloader.sample_input = jnp.array(metaloader.dummy_input)
# Define the loss, meta-model and meta-learning algorithm
base_model = metax.models.Conv4(args.channels, args.bn_decay, readout=args.ways)
meta_model = metax.module.LearnedInit(
loss_fn_inner=metax.energy.CrossEntropy(),
loss_fn_outer=metax.energy.CrossEntropy(),
base_learner=base_model,
reg_strength=None
)
meta_learner = metax.learner.ModelAgnosticMetaLearning(
meta_model=meta_model,
batch_size=args.batch_size,
steps_inner=args.steps_inner,
optim_fn_inner=optax.sgd(args.lr_inner),
optim_fn_outer=optax.adam(args.lr_outer),
first_order=args.first_order,
)
# Initialize
rng = jax.random.PRNGKey(args.seed)
rng_reset, rng_train, rng_test = jax.random.split(rng, 3)
meta_state = meta_learner.reset(rng_reset, metaloader.sample_input)
meta_update = jax.jit(meta_learner.update)
meta_eval = jax.jit(meta_learner.eval, static_argnames="steps")
# Train
for idx, batch in zip(range(args.steps_outer), metaloader):
# Mangle data into the format expected by metax
| batch = MetaDataset( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: claws-lab/XLingEval
# Path: consistency/Medalpaca/model_medalpaca.py
def init_medalpaca_model(args):
# --- Flags from the original code ---
load_in_8bit = False
cache_dir = None
print(f"Loading model {args.model}...")
if args.model == "medalpaca-30b":
base_model = "decapoda-research/llama-30b-hf"
model_name = "medalpaca/medalpaca-lora-30b-8bit"
peft = True
elif args.model == "medalpaca-13b":
base_model = "decapoda-research/llama-13b-hf"
model_name = "medalpaca/medalpaca-lora-13b-8bit"
peft = True
elif args.model == "medalpaca-7b":
base_model = "../PPLM/models_hf/7B"
model_name = "medalpaca/medalpaca-7b"
model_name = "medalpaca/medalpaca-lora-7b-16bit"
peft = True
cache_dir = "../medAlpaca/medalpaca-7b"
else:
raise ValueError(f"Unknown model: {args.model}")
prompt_template = f"consistency/Medalpaca/prompt_templates/medalpaca_consistency.json"
# ------------------------------------
# Only initialize this model on a Linux machine, which has sufficient GPU memory.
print("peft", peft)
print("load_in_8bit", load_in_8bit)
if platform.system() == "Linux":
model = Inferer(
model_name=model_name,
prompt_template=prompt_template,
# f"../medalpaca/prompt_templates/medalpaca.json",
base_model=base_model,
peft=peft,
load_in_8bit=load_in_8bit,
args=args,
cache_dir=cache_dir,
)
else:
model = None
return model
# Path: arguments.py
REDDIT_COMMENTS_DIR = "E:\\data\\Reddit\\comments"
DATA_DIR = "F:\\data\\NLP"
DEVICE_MAP = {"": 0}
DATA_DIR = osp.join(const.HOME_DIR_LINUX_SERVER, "Workspace", "data", "NLP")
DEVICE_MAP = {"": [0, 1, 2, 3]}
DATA_DIR = osp.join(const.HOME_DIR_LINUX, "Workspace", "storage", "NLP")
DEVICE_MAP = {"": [0, 1]}
DATA_DIR = "data"
DEVICE_MAP = {"": 0}
# Path: consistency/data_consistency.py
def load_data_consistency(args):
if args.dataset_name == "liveqa":
examples = load_LiveQA(language=args.target_language)
elif args.dataset_name == "medicationqa":
examples = load_MedicationQA(language=args.target_language)
elif args.dataset_name == "healthqa":
examples = load_HealthQA(split=args.split,
language=args.target_language)
else:
raise NotImplementedError
return examples
# Path: consistency/data_consistency.py
def load_results_consistency(args):
path = get_consistency_results_path(args)
if osp.exists(path):
results_df = pd.read_excel(path)
print(f"Loaded {len(results_df)} examples from {path}")
else:
results_df = pd.DataFrame()
return results_df
# Path: consistency/data_consistency.py
def get_consistency_results_path(args):
if args.model != "gpt35":
model_prefix = f"{args.model}_"
else:
model_prefix = ""
if args.dataset_name in ["liveqa", "medicationqa"]:
path = osp.join(args.output_dir, "consistency",
f"{model_prefix}{args.dataset_name}_consistency_temp{args.temperature}.xlsx")
elif args.dataset_name in ["healthqa"]:
path = osp.join(args.output_dir, "consistency",
f"{model_prefix}{args.dataset_name}_{args.split}_consistency_temp{args.temperature}.xlsx")
else:
raise NotImplementedError
return path
# Path: consistency/consistency_get_medalpaca_answer.py
import os
import os.path as osp
import re
import string
import sys
import traceback
import torch
import pandas as pd
import const
from tqdm import trange
from consistency.Medalpaca.model_medalpaca import init_medalpaca_model
from arguments import args
from consistency.data_consistency import load_data_consistency, \
load_results_consistency, get_consistency_results_path
from setup import project_setup
from consistency.Medalpaca.params_medalpaca import *
sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))
if osp.exists(const.HOME_DIR_LINUX):
cuda_path = "/usr/local/cuda-11.7/bin/nvcc"
if "LD_LIBRARY_PATH" in os.environ:
os.environ["LD_LIBRARY_PATH"] += f"{cuda_path}"
else:
os.environ["LD_LIBRARY_PATH"] = cuda_path
def format_question(d):
question = d["question"]
options = d["options"]
for k, v in options.items():
question += f"\n{k}: {v}"
return question
def strip_special_chars(input_str):
"Remove special characters from string start/end"
if not input_str:
return input_str
start_index = 0
end_index = len(input_str) - 1
while start_index < len(input_str) and input_str[
start_index] not in string.ascii_letters + string.digits:
start_index += 1
while end_index >= 0 and input_str[
end_index] not in string.ascii_letters + string.digits:
end_index -= 1
if start_index <= end_index:
return input_str[start_index:end_index + 1]
else:
return ""
def starts_with_capital_letter(input_str):
"""
The answers should start like this:
'A: '
'A. '
'A '
"""
pattern = r'^[A-Z](:|\.|) .+'
return bool(re.match(pattern, input_str))
def run_consistency_medalpaca():
path = get_consistency_results_path(args)
model = init_medalpaca_model(args)
sampling['temperature'] = args.temperature
examples = load_data_consistency(args)
| results_df = load_results_consistency(args) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: vtuber-plan/olah
# Path: olah/configs.py
class OlahConfig(object):
def __init__(self, path: Optional[str] = None) -> None:
# basic
self.host = "localhost"
self.port = 8090
self.ssl_key = None
self.ssl_cert = None
self.repos_path = "./repos"
self.hf_url = "https://huggingface.co"
self.hf_lfs_url = "https://cdn-lfs.huggingface.co"
self.mirror_url = "http://localhost:8090"
self.mirror_lfs_url = "http://localhost:8090"
# accessibility
self.offline = True
self.proxy = OlahRuleList.from_list(DEFAULT_PROXY_RULES)
self.cache = OlahRuleList.from_list(DEFAULT_CACHE_RULES)
if path is not None:
self.read_toml(path)
def empty_str(self, s: str) -> Optional[str]:
if s == "":
return None
else:
return s
def read_toml(self, path: str):
config = toml.load(path)
if "basic" in config:
basic = config["basic"]
self.host = basic.get("host", self.host)
self.port = basic.get("port", self.port)
self.ssl_key = self.empty_str(basic.get("ssl-key", self.ssl_key))
self.ssl_cert = self.empty_str(basic.get("ssl-cert", self.ssl_cert))
self.repos_path = basic.get("repos-path", self.repos_path)
self.hf_url = basic.get("hf-url", self.hf_url)
self.hf_lfs_url = basic.get("hf-lfs-url", self.hf_lfs_url)
self.mirror_url = basic.get("mirror-url", self.mirror_url)
self.mirror_lfs_url = basic.get("mirror-lfs-url", self.mirror_lfs_url)
if "accessibility" in config:
accessibility = config["accessibility"]
self.offline = accessibility.get("offline", self.offline)
self.proxy = OlahRuleList.from_list(accessibility.get("proxy", self.proxy))
self.cache = OlahRuleList.from_list(accessibility.get("cache", self.cache))
# Path: olah/constants.py
CHUNK_SIZE = 4096
# Path: olah/constants.py
WORKER_API_TIMEOUT = 15
# Path: olah/utls.py
async def check_cache_rules_hf(app, repo_type: Literal["model", "dataset", "space"], org: str, repo: str) -> bool:
config: OlahConfig = app.app_settings.config
return config.cache.allow(f"{org}/{repo}")
# Path: olah/meta.py
import os
import shutil
import tempfile
import httpx
from typing import Dict, Literal
from fastapi import FastAPI, Request
from olah.configs import OlahConfig
from olah.constants import CHUNK_SIZE, WORKER_API_TIMEOUT
from olah.utls import check_cache_rules_hf
async def meta_cache_generator(app: FastAPI, save_path: str):
yield {}
with open(save_path, "rb") as f:
while True:
chunk = f.read(CHUNK_SIZE)
if not chunk:
break
yield chunk
async def meta_proxy_generator(app: FastAPI, headers: Dict[str, str], meta_url: str, allow_cache: bool, save_path: str):
try:
temp_file_path = None
async with httpx.AsyncClient() as client:
with tempfile.NamedTemporaryFile(mode="wb", delete=False) as temp_file:
if not allow_cache:
temp_file = open(os.devnull, 'wb')
async with client.stream(
method="GET", url=meta_url,
headers=headers,
timeout=WORKER_API_TIMEOUT,
) as response:
response_headers = response.headers
yield response_headers
async for raw_chunk in response.aiter_raw():
if not raw_chunk:
continue
temp_file.write(raw_chunk)
yield raw_chunk
if not allow_cache:
temp_file_path = None
else:
temp_file_path = temp_file.name
if temp_file_path is not None:
shutil.copyfile(temp_file_path, save_path)
finally:
if temp_file_path is not None:
os.remove(temp_file_path)
async def meta_generator(app: FastAPI, repo_type: Literal["model", "dataset"], org: str, repo: str, commit: str, request: Request):
headers = {k: v for k, v in request.headers.items()}
headers.pop("host")
# save
repos_path = app.app_settings.repos_path
save_dir = os.path.join(repos_path, f"api/{repo_type}s/{org}/{repo}/revision/{commit}")
save_path = os.path.join(save_dir, "meta.json")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
use_cache = os.path.exists(save_path)
| allow_cache = await check_cache_rules_hf(app, repo_type, org, repo) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: RF-Tar-Railt/satori-python
# Path: src/satori/model.py
class Event:
id: int
type: str
platform: str
self_id: str
timestamp: datetime
argv: Optional[ArgvInteraction] = None
button: Optional[ButtonInteraction] = None
channel: Optional[Channel] = None
guild: Optional[Guild] = None
login: Optional[Login] = None
member: Optional[Member] = None
message: Optional[MessageObject] = None
operator: Optional[User] = None
role: Optional[Role] = None
user: Optional[User] = None
@classmethod
def parse(cls, raw: dict):
data = {
"id": raw["id"],
"type": raw["type"],
"platform": raw["platform"],
"self_id": raw["self_id"],
"timestamp": datetime.fromtimestamp(int(raw["timestamp"]) / 1000),
}
if "argv" in raw:
data["argv"] = ArgvInteraction(**raw["argv"])
if "button" in raw:
data["button"] = ButtonInteraction(**raw["button"])
if "channel" in raw:
data["channel"] = Channel.parse(raw["channel"])
if "guild" in raw:
data["guild"] = Guild.parse(raw["guild"])
if "login" in raw:
data["login"] = Login.parse(raw["login"])
if "member" in raw:
data["member"] = Member.parse(raw["member"])
if "message" in raw:
data["message"] = MessageObject.parse(raw["message"])
if "operator" in raw:
data["operator"] = User.parse(raw["operator"])
if "role" in raw:
data["role"] = Role.parse(raw["role"])
if "user" in raw:
data["user"] = User.parse(raw["user"])
return cls(**data)
def dump(self):
res = {
"id": self.id,
"type": self.type,
"platform": self.platform,
"self_id": self.self_id,
"timestamp": int(self.timestamp.timestamp() * 1000),
}
if self.argv:
res["argv"] = self.argv.dump()
if self.button:
res["button"] = self.button.dump()
if self.channel:
res["channel"] = self.channel.dump()
if self.guild:
res["guild"] = self.guild.dump()
if self.login:
res["login"] = self.login.dump()
if self.member:
res["member"] = self.member.dump()
if self.message:
res["message"] = self.message.dump()
if self.operator:
res["operator"] = self.operator.dump()
if self.role:
res["role"] = self.role.dump()
if self.user:
res["user"] = self.user.dump()
return res
# Path: src/satori/model.py
class Login:
status: LoginStatus
user: Optional[User] = None
self_id: Optional[str] = None
platform: Optional[str] = None
@classmethod
def parse(cls, raw: dict):
data = raw.copy()
if "user" in raw:
data["user"] = User(**raw["user"])
data["status"] = LoginStatus(data["status"])
return cls(**data)
def dump(self):
res: Dict[str, Any] = {"status": self.status.value}
if self.user:
res["user"] = self.user.dump()
if self.self_id:
res["self_id"] = self.self_id
if self.platform:
res["platform"] = self.platform
return res
# Path: src/satori/server/model.py
class Request(Generic[TA]):
headers: dict[str, Any]
action: TA
params: Any
# Path: src/satori/server/adapter.py
from abc import abstractmethod
from typing import Any, AsyncIterator, Dict, List
from launart import Service
from satori.const import Api
from ..model import Event, Login
from .model import Request
class Adapter(Service):
@abstractmethod
def get_platform(self) -> str:
...
@abstractmethod
def publisher(self) -> AsyncIterator[Event]:
...
@abstractmethod
def validate_headers(self, headers: Dict[str, Any]) -> bool:
...
@abstractmethod
def authenticate(self, token: str) -> bool:
...
@abstractmethod
| async def get_logins(self) -> List[Login]: |