ref
stringlengths
4
432
prompt
stringlengths
1.74k
34.3k
class BlipImageBaseProcessor(BaseProcessor):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: DLYuanGod/TinyGPT-V # Path: minigpt4/common/registry.py class Registry: def register_builder(cls, name): def wrap(builder_cls): def register_task(cls, name): def wrap(task_cls): def register_model(cls, name): def wrap(model_cls): def register_processor(cls, name): def wrap(processor_cls): def register_lr_scheduler(cls, name): def wrap(lr_sched_cls): def register_runner(cls, name): def wrap(runner_cls): def register_path(cls, name, path): def register(cls, name, obj): def get_builder_class(cls, name): def get_model_class(cls, name): def get_task_class(cls, name): def get_processor_class(cls, name): def get_lr_scheduler_class(cls, name): def get_runner_class(cls, name): def list_runners(cls): def list_models(cls): def list_tasks(cls): def list_processors(cls): def list_lr_schedulers(cls): def list_datasets(cls): def get_path(cls, name): def get(cls, name, default=None, no_warning=False): def unregister(cls, name): # Path: minigpt4/processors/base_processor.py class BaseProcessor: def __init__(self): self.transform = lambda x: x return def __call__(self, item): return self.transform(item) @classmethod def from_config(cls, cfg=None): return cls() def build(self, **kwargs): cfg = OmegaConf.create(kwargs) return self.from_config(cfg) # Path: minigpt4/processors/randaugment.py class RandomAugment(object): def __init__(self, N=2, M=10, isPIL=False, augs=[]): self.N = N self.M = M self.isPIL = isPIL if augs: self.augs = augs else: self.augs = list(arg_dict.keys()) def get_random_ops(self): sampled_ops = np.random.choice(self.augs, self.N) return [(op, 0.5, self.M) for op in sampled_ops] def __call__(self, img): if self.isPIL: img = np.array(img) ops = self.get_random_ops() for name, prob, level in ops: if np.random.random() > prob: continue args = arg_dict[name](level) img = func_dict[name](img, *args) return img # Path: minigpt4/processors/blip_processors.py import re from minigpt4.common.registry import registry from minigpt4.processors.base_processor import BaseProcessor from minigpt4.processors.randaugment import RandomAugment from omegaconf import OmegaConf from torchvision import transforms from torchvision.transforms.functional import InterpolationMode """ Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """
rs = tool.runffmpeg(params)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: jianchang512/vocal-separate # Path: vocal/cfg.py LANG = "en" if locale.getdefaultlocale()[0].split('_')[0].lower() != 'zh' else "zh" ROOT_DIR = os.getcwd() MODEL_DIR = os.path.join(ROOT_DIR, 'pretrained_models') STATIC_DIR = os.path.join(ROOT_DIR, 'static') TMP_DIR = os.path.join(STATIC_DIR, 'tmp') FILES_DIR = os.path.join(STATIC_DIR, 'files') # Path: vocal/tool.py def runffmpeg(arg): def checkupdate(): def openweb(web_address): # Path: vocal/cfg.py ROOT_DIR = os.getcwd() # Path: start.py import logging import threading import sys import os import subprocess from flask import Flask, request, render_template, jsonify, send_from_directory from gevent.pywsgi import WSGIServer, WSGIHandler,LoggingLogAdapter from logging.handlers import RotatingFileHandler from vocal import cfg, tool from vocal.cfg import ROOT_DIR from spleeter.separator import Separator class CustomRequestHandler(WSGIHandler): def log_request(self): pass # 禁用 Werkzeug 默认的日志处理器 log = logging.getLogger('werkzeug') log.handlers[:] = [] log.setLevel(logging.WARNING) app = Flask(__name__, static_folder=os.path.join(ROOT_DIR, 'static'), static_url_path='/static', template_folder=os.path.join(ROOT_DIR, 'templates')) root_log = logging.getLogger() # Flask的根日志记录器 root_log.handlers = [] root_log.setLevel(logging.WARNING) # 配置日志 app.logger.setLevel(logging.WARNING) # 设置日志级别为 INFO # 创建 RotatingFileHandler 对象,设置写入的文件路径和大小限制 file_handler = RotatingFileHandler(os.path.join(ROOT_DIR, 'vocal.log'), maxBytes=1024 * 1024, backupCount=5) # 创建日志的格式 formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # 设置文件处理器的级别和格式 file_handler.setLevel(logging.WARNING) file_handler.setFormatter(formatter) # 将文件处理器添加到日志记录器中 app.logger.addHandler(file_handler) @app.route('/static/<path:filename>') def static_files(filename): return send_from_directory(app.config['STATIC_FOLDER'], filename) @app.route('/') def index(): return render_template("index.html",cuda=cfg.cuda, language=cfg.LANG,root_dir=ROOT_DIR.replace('\\', '/')) # 上传音频 @app.route('/upload', methods=['POST']) def upload(): try: # 获取上传的文件 audio_file = request.files['audio'] # 如果是mp4 noextname, ext = os.path.splitext(audio_file.filename) ext = ext.lower() # 如果是视频,先分离 wav_file = os.path.join(cfg.TMP_DIR, f'{noextname}.wav') if os.path.exists(wav_file) and os.path.getsize(wav_file) > 0: return jsonify({'code': 0, 'msg': cfg.transobj['lang1'], "data": os.path.basename(wav_file)}) msg="" if ext in ['.mp4', '.mov', '.avi', '.mkv', '.mpeg', '.mp3', '.flac']: video_file = os.path.join(cfg.TMP_DIR, f'{noextname}{ext}') audio_file.save(video_file) params = [ "-i", video_file, ] if ext not in ['.mp3', '.flac']: params.append('-vn') params.append(wav_file)
self.layers = _get_clones(decoder_layer, num_layers)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: ali-vilab/dreamtalk # Path: core/networks/transformer.py def _get_activation_fn(activation): """Return an activation function given a string""" if activation == "relu": return F.relu if activation == "gelu": return F.gelu if activation == "glu": return F.glu raise RuntimeError(F"activation should be relu/gelu, not {activation}.") # Path: core/networks/transformer.py def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) # Path: core/networks/dynamic_linear.py class DynamicLinear(nn.Module): def __init__(self, in_planes, out_planes, cond_planes, bias=True, K=4, temperature=30, ratio=4, init_weight=True): super().__init__() self.dynamic_conv = DynamicConv( in_planes, out_planes, cond_planes, kernel_size=1, stride=1, padding=0, bias=bias, K=K, ratio=ratio, temperature=temperature, init_weight=init_weight, ) def forward(self, x, cond): """ Args: x (_type_): (L, B, C_in) cond (_type_): (B, C_style) Returns: _type_: (L, B, C_out) """ x = x.permute(1, 2, 0).unsqueeze(-1) out = self.dynamic_conv(x, cond) # (B, C_out, L, 1) out = out.squeeze().permute(2, 0, 1) return out # Path: core/networks/dynamic_fc_decoder.py import torch.nn as nn import torch from core.networks.transformer import _get_activation_fn, _get_clones from core.networks.dynamic_linear import DynamicLinear class DynamicFCDecoderLayer(nn.Module): def __init__( self, d_model, nhead, d_style, dynamic_K, dynamic_ratio, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False, ): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) # Implementation of Feedforward model # self.linear1 = nn.Linear(d_model, dim_feedforward) self.linear1 = DynamicLinear(d_model, dim_feedforward, d_style, K=dynamic_K, ratio=dynamic_ratio) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) # self.linear2 = DynamicLinear(dim_feedforward, d_model, d_style, K=dynamic_K, ratio=dynamic_ratio) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.norm3 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before def with_pos_embed(self, tensor, pos): return tensor if pos is None else tensor + pos def forward_post( self, tgt, memory, style, tgt_mask=None, memory_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None, pos=None, query_pos=None, ): # q = k = self.with_pos_embed(tgt, query_pos) tgt2 = self.self_attn(tgt, tgt, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] tgt = tgt + self.dropout1(tgt2) tgt = self.norm1(tgt) tgt2 = self.multihead_attn( query=tgt, key=memory, value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask )[0] tgt = tgt + self.dropout2(tgt2) tgt = self.norm2(tgt) # tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt, style))), style) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt, style)))) tgt = tgt + self.dropout3(tgt2) tgt = self.norm3(tgt) return tgt # def forward_pre( # self, # tgt, # memory, # tgt_mask=None, # memory_mask=None, # tgt_key_padding_mask=None, # memory_key_padding_mask=None, # pos=None, # query_pos=None, # ): # tgt2 = self.norm1(tgt) # # q = k = self.with_pos_embed(tgt2, query_pos) # tgt2 = self.self_attn(tgt2, tgt2, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] # tgt = tgt + self.dropout1(tgt2) # tgt2 = self.norm2(tgt) # tgt2 = self.multihead_attn( # query=tgt2, key=memory, value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask # )[0] # tgt = tgt + self.dropout2(tgt2) # tgt2 = self.norm3(tgt) # tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) # tgt = tgt + self.dropout3(tgt2) # return tgt def forward( self, tgt, memory, style, tgt_mask=None, memory_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None, pos=None, query_pos=None, ): if self.normalize_before: raise NotImplementedError # return self.forward_pre( # tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos # ) return self.forward_post( tgt, memory, style, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos ) class DynamicFCDecoder(nn.Module): def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): super().__init__()
linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: jiawei-ren/dreamgaussian4d # Path: diffusers/src/diffusers/utils/constants.py USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version # Path: diffusers/src/diffusers/models/lora.py class LoRACompatibleLinear(nn.Linear): """ A Linear layer that can be used with LoRA. """ def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs): super().__init__(*args, **kwargs) self.lora_layer = lora_layer def set_lora_layer(self, lora_layer: Optional[LoRALinearLayer]): self.lora_layer = lora_layer def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False): if self.lora_layer is None: return dtype, device = self.weight.data.dtype, self.weight.data.device w_orig = self.weight.data.float() w_up = self.lora_layer.up.weight.data.float() w_down = self.lora_layer.down.weight.data.float() if self.lora_layer.network_alpha is not None: w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0]) if safe_fusing and torch.isnan(fused_weight).any().item(): raise ValueError( "This LoRA weight seems to be broken. " f"Encountered NaN values when trying to fuse LoRA weights for {self}." "LoRA weights will not be fused." ) self.weight.data = fused_weight.to(device=device, dtype=dtype) # we can drop the lora layer now self.lora_layer = None # offload the up and down matrices to CPU to not blow the memory self.w_up = w_up.cpu() self.w_down = w_down.cpu() self._lora_scale = lora_scale def _unfuse_lora(self): if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None): return fused_weight = self.weight.data dtype, device = fused_weight.dtype, fused_weight.device w_up = self.w_up.to(device=device).float() w_down = self.w_down.to(device).float() unfused_weight = fused_weight.float() - (self._lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0]) self.weight.data = unfused_weight.to(device=device, dtype=dtype) self.w_up = None self.w_down = None def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor: if self.lora_layer is None: out = super().forward(hidden_states) return out else: out = super().forward(hidden_states) + (scale * self.lora_layer(hidden_states)) return out # Path: diffusers/src/diffusers/models/activations.py import torch import torch.nn.functional as F from torch import nn from ..utils import USE_PEFT_BACKEND from .lora import LoRACompatibleLinear # coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ACTIVATION_FUNCTIONS = { "swish": nn.SiLU(), "silu": nn.SiLU(), "mish": nn.Mish(), "gelu": nn.GELU(), "relu": nn.ReLU(), } def get_activation(act_fn: str) -> nn.Module: """Helper function to get activation function from string. Args: act_fn (str): Name of activation function. Returns: nn.Module: Activation function. """ act_fn = act_fn.lower() if act_fn in ACTIVATION_FUNCTIONS: return ACTIVATION_FUNCTIONS[act_fn] else: raise ValueError(f"Unsupported activation function: {act_fn}") class GELU(nn.Module): r""" GELU activation function with tanh approximation support with `approximate="tanh"`. Parameters: dim_in (`int`): The number of channels in the input. dim_out (`int`): The number of channels in the output. approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation. """ def __init__(self, dim_in: int, dim_out: int, approximate: str = "none"): super().__init__() self.proj = nn.Linear(dim_in, dim_out) self.approximate = approximate def gelu(self, gate: torch.Tensor) -> torch.Tensor: if gate.device.type != "mps": return F.gelu(gate, approximate=self.approximate) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype) def forward(self, hidden_states): hidden_states = self.proj(hidden_states) hidden_states = self.gelu(hidden_states) return hidden_states class GEGLU(nn.Module): r""" A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function. Parameters: dim_in (`int`): The number of channels in the input. dim_out (`int`): The number of channels in the output. """ def __init__(self, dim_in: int, dim_out: int): super().__init__()
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Meituan-AutoML/MobileVLM # Path: mobilevlm/model/vision_encoder.py def build_vision_tower(model_cfg, **kwargs): vision_tower = getattr(model_cfg, 'mm_vision_tower', getattr(model_cfg, 'vision_tower', None)) is_absolute_path_exists = os.path.exists(vision_tower) if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion"): vision_tower_type = getattr(model_cfg, 'vision_tower_type', None) if vision_tower_type == "clip": return CLIPVisionTower(vision_tower, args=model_cfg, **kwargs) raise ValueError(f'Unknown vision tower: {vision_tower}') # Path: mobilevlm/model/vision_projector.py def build_vision_projector(config, delay_load=False, **kwargs): projector_type = getattr(config, 'mm_projector_type', 'linear') if projector_type == 'linear': return nn.Linear(config.mm_hidden_size, config.hidden_size) elif projector_type.startswith('mlp'): mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type) if mlp_gelu_match: mlp_depth = int(mlp_gelu_match.group(1)) modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)] for _ in range(1, mlp_depth): modules.append(nn.GELU()) modules.append(nn.Linear(config.hidden_size, config.hidden_size)) return nn.Sequential(*modules) elif projector_type.startswith('ldpnet'): return LDPNetProjector(config) raise ValueError(f'Unknown projector type: {projector_type}') # Path: mobilevlm/constants.py IGNORE_INDEX = -100 # Path: mobilevlm/constants.py IMAGE_TOKEN_INDEX = -200 # Path: mobilevlm/constants.py DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>" # Path: mobilevlm/constants.py DEFAULT_IM_START_TOKEN = "<im_start>" # Path: mobilevlm/constants.py DEFAULT_IM_END_TOKEN = "<im_end>" # Path: mobilevlm/model/mobilevlm.py import torch import torch.nn as nn from abc import ABC, abstractmethod from transformers import AutoTokenizer, BitsAndBytesConfig from mobilevlm.model.vision_encoder import build_vision_tower from mobilevlm.model.vision_projector import build_vision_projector from mobilevlm.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, \ DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN from mobilevlm.model.mobilellama import MobileLlamaForCausalLM class MobileVLMMetaModel: def __init__(self, config): super(MobileVLMMetaModel, self).__init__(config) if hasattr(config, "mm_vision_tower"): self.vision_tower = build_vision_tower(config, delay_load=False) self.mm_projector = build_vision_projector(config) def get_vision_tower(self): vision_tower = getattr(self, 'vision_tower', None) if type(vision_tower) is list: vision_tower = vision_tower[0] return vision_tower def initialize_vision_modules(self, model_args, fsdp=None): mm_vision_select_layer = model_args.mm_vision_select_layer mm_vision_select_feature = model_args.mm_vision_select_feature pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter self.config.mm_vision_tower = model_args.vision_tower self.config.use_mm_proj = True self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear') self.config.mm_vision_select_layer = mm_vision_select_layer self.config.mm_vision_select_feature = mm_vision_select_feature # Build VisionTower vision_tower = build_vision_tower(model_args) if fsdp is not None and len(fsdp) > 0: self.vision_tower = [vision_tower] else: self.vision_tower = vision_tower self.config.mm_hidden_size = vision_tower.hidden_size # Build Vision-Projector self.mm_projector = build_vision_projector(self.config) if pretrain_mm_mlp_adapter is not None: mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu') def get_w(weights, keyword): return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k} self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector')) class MobileVLMMetaForCausalLM(ABC): @abstractmethod def get_model(self): pass def get_vision_tower(self): return self.get_model().get_vision_tower() def encode_images(self, images): image_features = self.get_model().get_vision_tower()(images) image_features = self.get_model().mm_projector(image_features) return image_features def prepare_inputs_labels_for_multimodal( self, input_ids, attention_mask, past_key_values, labels, images ): vision_tower = self.get_vision_tower() if vision_tower is None or images is None or input_ids.shape[1] == 1: if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1: attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device) return input_ids, attention_mask, past_key_values, None, labels if type(images) is list or images.ndim == 5: concat_images = torch.cat([image for image in images], dim=0) image_features = self.encode_images(concat_images) split_sizes = [image.shape[0] for image in images] image_features = torch.split(image_features, split_sizes, dim=0) image_features = [x.flatten(0, 1) for x in image_features] else: image_features = self.encode_images(images) new_input_embeds = [] new_labels = [] if labels is not None else None cur_image_idx = 0 for batch_idx, cur_input_ids in enumerate(input_ids):
configs = (lora_config, llama_adapter_config, prefix_config)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: kinggongzilla/ai-clone-whatsapp # Path: configs/datasets.py class custom_dataset: # Path: configs/peft.py class lora_config: r: int=8 lora_alpha: int=32 target_modules: List[str] = field(default_factory=lambda: ["q_proj", "v_proj"]) bias= "none" task_type: str= "CAUSAL_LM" lora_dropout: float=0.05 inference_mode: bool = False # Path: configs/peft.py class llama_adapter_config: adapter_len: int= 10 adapter_layers: int= 30 task_type: str= "CAUSAL_LM" # Path: configs/peft.py class prefix_config: num_virtual_tokens: int=30 task_type: str= "CAUSAL_LM" # Path: configs/training.py class train_config: whatsapp_username: str="" # your own whatsapp user name as it is in the chat .txt files model_name: str="mistralai/Mistral-7B-Instruct-v0.2" enable_fsdp: bool=False low_cpu_fsdp: bool=False run_validation: bool=False batch_size_training: int=1 batching_strategy: str="packing" #alternative: padding context_length: int=4096 gradient_accumulation_steps: int=1 gradient_clipping: bool = False gradient_clipping_threshold: float = 1.0 num_epochs: int=1 num_workers_dataloader: int=1 lr: float=1e-4 weight_decay: float=0.0 gamma: float= 0.85 seed: int=42 use_fp16: bool=True mixed_precision: bool=True val_batch_size: int=1 dataset = "custom_dataset" data_dir: str = "data/preprocessing/processed_chats" peft_method: str = "lora" # None , llama_adapter, prefix use_peft: bool=True output_dir: str = "checkpoints" freeze_layers: bool = False num_freeze_layers: int = 1 quantization: bool = True one_gpu: bool = False save_model: bool = True dist_checkpoint_root_folder: str="PATH/to/save/FSDP/model" # will be used if using FSDP dist_checkpoint_folder: str="fine-tuned" # will be used if using FSDP save_optimizer: bool=False # will be used if using FSDP use_fast_kernels: bool = False # Enable using SDPA from PyTroch Accelerated Transformers, make use Flash Attention and Xformer memory-efficient kernels # Path: data/sampler.py class LengthBasedBatchSampler(torch.utils.data.BatchSampler): def __init__(self, data_source, batch_size: int, drop_last: bool, shuffle: bool=True) -> None: if isinstance(next(iter(data_source)), dict): first_key = next(iter(next(iter(data_source)).keys())) self.lengths = [len(d[first_key]) for d in data_source] else: self.lengths = [len(d) for d in data_source] self.batch_size = batch_size self.drop_last = drop_last self.shuffle = shuffle def __iter__(self): ids = np.argsort(self.lengths) if self.drop_last: ids = ids[:len(ids) // self.batch_size * self.batch_size] batches = [ids[i:i+self.batch_size] for i in range(0, len(ids), self.batch_size)] if self.shuffle: random.shuffle(batches) for b in batches: yield b def __len__(self): if self.drop_last: return len(self.lengths) // self.batch_size else: return len(self.lengths) // self.batch_size + (len(self.lengths) % self.batch_size > 0) # Path: data/sampler.py class DistributedLengthBasedBatchSampler(torch.utils.data.BatchSampler): def __init__(self, data_source, batch_size: int, num_replicas: int, rank: int, shuffle: bool = True, seed: int = 0) -> None: random.seed(seed) self.batch_sampler = LengthBasedBatchSampler( data_source, batch_size=batch_size, drop_last=True, shuffle=shuffle ) self.num_replicas = num_replicas self.rank = rank def __iter__(self): max_length = len(self.batch_sampler) // self.num_replicas * self.num_replicas return islice(self.batch_sampler, self.rank, max_length, self.num_replicas) def __len__(self): return len(self.batch_sampler) // self.num_replicas # Path: utils/dataset_utils.py DATASET_PREPROC = { "custom_dataset": get_custom_dataset, } # Path: utils/config_utils.py import inspect import torch.distributed as dist from dataclasses import asdict from torch.utils.data import DistributedSampler from peft import ( LoraConfig, AdaptionPromptConfig, PrefixTuningConfig, ) from transformers import default_data_collator from transformers.data import DataCollatorForSeq2Seq from configs import datasets, lora_config, llama_adapter_config, prefix_config, train_config from data.sampler import LengthBasedBatchSampler, DistributedLengthBasedBatchSampler from utils.dataset_utils import DATASET_PREPROC # Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. def update_config(config, **kwargs): if isinstance(config, (tuple, list)): for c in config: update_config(c, **kwargs) else: for k, v in kwargs.items(): if hasattr(config, k): setattr(config, k, v) elif "." in k: # allow --some_config.some_param=True config_name, param_name = k.split(".") if type(config).__name__ == config_name: if hasattr(config, param_name): setattr(config, param_name, v) else: # In case of specialized config we can warm user print(f"Warning: {config_name} does not accept parameter: {k}") elif isinstance(config, train_config): print(f"Warning: unknown parameter {k}") def generate_peft_config(train_config, kwargs):
cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(bz_boxes), box_cxcywh_to_xyxy(bz_gtboxs))
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: FoundationVision/UniRef # Path: projects/UniRef/uniref/util/box_ops.py def box_cxcywh_to_xyxy(x): # print('box:\n', x) x_c, y_c, w, h = x.unbind(-1) b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] return torch.stack(b, dim=-1) # Path: projects/UniRef/uniref/util/box_ops.py def generalized_box_iou(boxes1, boxes2): """ Generalized IoU from https://giou.stanford.edu/ The boxes should be in [x0, y0, x1, y1] format Returns a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) """ # degenerate boxes gives inf / nan results # so do an early check assert (boxes1[:, 2:] >= boxes1[:, :2]).all() assert (boxes2[:, 2:] >= boxes2[:, :2]).all() iou, union = box_iou(boxes1, boxes2) lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) wh = (rb - lt).clamp(min=0) # [N,M,2] area = wh[:, :, 0] * wh[:, :, 1] return iou - (area - union) / (area+1e-7) # Path: projects/UniRef/uniref/models/deformable_detr/matcher.py import torch import torch.nn.functional as F import torchvision.ops as ops from scipy.optimize import linear_sum_assignment from torch import nn from ...util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou # ------------------------------------------------------------------------ # Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # ------------------------------------------------------------------------ """ Modules to compute the matching cost and solve the corresponding LSAP. """ class HungarianMatcher(nn.Module): """This class computes an assignment between the targets and the predictions of the network For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). """ def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1): """Creates the matcher Params: cost_class: This is the relative weight of the classification error in the matching cost cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost """ super().__init__() self.cost_class = cost_class self.cost_bbox = cost_bbox self.cost_giou = cost_giou assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0" def forward_ota(self, outputs, targets): """ simOTA for detr """ with torch.no_grad(): bs, num_queries = outputs["pred_logits"].shape[:2] out_prob = outputs["pred_logits"].sigmoid() out_bbox = outputs["pred_boxes"] # 跳过frame 维度 indices = [] matched_ids = [] for batch_idx in range(bs): bz_boxes = out_bbox[batch_idx] #[300,4] bz_out_prob = out_prob[batch_idx] bz_tgt_ids = targets[batch_idx]["labels"] num_insts = len(bz_tgt_ids) bz_gtboxs = targets[batch_idx]['boxes'].reshape(num_insts,4) #[num_gt, 4] fg_mask, is_in_boxes_and_center = \ self.get_in_boxes_info(bz_boxes,bz_gtboxs,expanded_strides=32) pair_wise_ious = ops.box_iou(box_cxcywh_to_xyxy(bz_boxes), box_cxcywh_to_xyxy(bz_gtboxs)) # pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8) # Compute the classification cost. alpha = 0.25 gamma = 2.0 neg_cost_class = (1 - alpha) * (bz_out_prob ** gamma) * (-(1 - bz_out_prob + 1e-8).log()) pos_cost_class = alpha * ((1 - bz_out_prob) ** gamma) * (-(bz_out_prob + 1e-8).log()) cost_class = pos_cost_class[:, bz_tgt_ids] - neg_cost_class[:, bz_tgt_ids]
self.encoding = get_encoding(3, self.cfg.dir_encoding_config)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: xhuangcv/humannorm # Path: threestudio/models/materials/base.py class BaseMaterial(BaseModule): @dataclass class Config(BaseModule.Config): pass cfg: Config requires_normal: bool = False requires_tangent: bool = False def configure(self): pass def forward(self, *args, **kwargs) -> Float[Tensor, "*B 3"]: raise NotImplementedError def export(self, *args, **kwargs) -> Dict[str, Any]: return {} # Path: threestudio/models/networks.py def get_encoding(n_input_dims: int, config) -> nn.Module: # input suppose to be range [0, 1] encoding: nn.Module if config.otype == "ProgressiveBandFrequency": encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config)) elif config.otype == "ProgressiveBandHashGrid": encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config)) else: encoding = TCNNEncoding(n_input_dims, config_to_primitive(config)) encoding = CompositeEncoding( encoding, include_xyz=config.get("include_xyz", False), xyz_scale=2.0, xyz_offset=-1.0, ) # FIXME: hard coded return encoding # Path: threestudio/models/networks.py def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module: network: nn.Module if config.otype == "VanillaMLP": network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config)) elif config.otype == "SphereInitVanillaMLP": network = SphereInitVanillaMLP( n_input_dims, n_output_dims, config_to_primitive(config) ) else: assert ( config.get("sphere_init", False) is False ), "sphere_init=True only supported by VanillaMLP" network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config)) return network # Path: threestudio/utils/ops.py def dot(x, y): return torch.sum(x * y, -1, keepdim=True) # Path: threestudio/utils/ops.py def get_activation(name) -> Callable: if name is None: return lambda x: x name = name.lower() if name == "none": return lambda x: x elif name == "lin2srgb": return lambda x: torch.where( x > 0.0031308, torch.pow(torch.clamp(x, min=0.0031308), 1.0 / 2.4) * 1.055 - 0.055, 12.92 * x, ).clamp(0.0, 1.0) elif name == "exp": return lambda x: torch.exp(x) elif name == "shifted_exp": return lambda x: torch.exp(x - 1.0) elif name == "trunc_exp": return trunc_exp elif name == "shifted_trunc_exp": return lambda x: trunc_exp(x - 1.0) elif name == "sigmoid": return lambda x: torch.sigmoid(x) elif name == "tanh": return lambda x: torch.tanh(x) elif name == "shifted_softplus": return lambda x: F.softplus(x - 1.0) elif name == "scale_-11_01": return lambda x: x * 0.5 + 0.5 else: try: return getattr(F, name) except AttributeError: raise ValueError(f"Unknown activation function: {name}") # Path: threestudio/models/materials/neural_radiance_material.py import random import torch import torch.nn as nn import torch.nn.functional as F import threestudio from dataclasses import dataclass, field from threestudio.models.materials.base import BaseMaterial from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import dot, get_activation from threestudio.utils.typing import * @threestudio.register("neural-radiance-material") class NeuralRadianceMaterial(BaseMaterial): @dataclass class Config(BaseMaterial.Config): input_feature_dims: int = 8 color_activation: str = "sigmoid" dir_encoding_config: dict = field( default_factory=lambda: {"otype": "SphericalHarmonics", "degree": 3} ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "FullyFusedMLP", "activation": "ReLU", "n_neurons": 16, "n_hidden_layers": 2, } ) cfg: Config def configure(self) -> None:
rs = tool.runffmpeg(params)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: jianchang512/stt # Path: stslib/cfg.py LANG = "en" if locale.getdefaultlocale()[0].split('_')[0].lower() != 'zh' else "zh" ROOT_DIR = os.getcwd() MODEL_DIR = os.path.join(ROOT_DIR, 'models') STATIC_DIR = os.path.join(ROOT_DIR, 'static') TMP_DIR = os.path.join(STATIC_DIR, 'tmp') # Path: stslib/tool.py def runffmpeg(arg): def checkupdate(): def openweb(web_address): def ms_to_time_string(*, ms=0, seconds=None): # Path: stslib/cfg.py ROOT_DIR = os.getcwd() # Path: start.py import logging import re import threading import sys import torch import os from flask import Flask, request, render_template, jsonify, send_from_directory from gevent.pywsgi import WSGIServer, WSGIHandler, LoggingLogAdapter from logging.handlers import RotatingFileHandler from stslib import cfg, tool from stslib.cfg import ROOT_DIR from faster_whisper import WhisperModel device = "cuda" if torch.cuda.is_available() else "cpu" class CustomRequestHandler(WSGIHandler): def log_request(self): pass # 配置日志 # 禁用 Werkzeug 默认的日志处理器 log = logging.getLogger('werkzeug') log.handlers[:] = [] log.setLevel(logging.WARNING) app = Flask(__name__, static_folder=os.path.join(ROOT_DIR, 'static'), static_url_path='/static', template_folder=os.path.join(ROOT_DIR, 'templates')) root_log = logging.getLogger() # Flask的根日志记录器 root_log.handlers = [] root_log.setLevel(logging.WARNING) # 配置日志 app.logger.setLevel(logging.WARNING) # 设置日志级别为 INFO # 创建 RotatingFileHandler 对象,设置写入的文件路径和大小限制 file_handler = RotatingFileHandler(os.path.join(ROOT_DIR, 'sts.log'), maxBytes=1024 * 1024, backupCount=5) # 创建日志的格式 formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # 设置文件处理器的级别和格式 file_handler.setLevel(logging.WARNING) file_handler.setFormatter(formatter) # 将文件处理器添加到日志记录器中 app.logger.addHandler(file_handler) @app.route('/static/<path:filename>') def static_files(filename): return send_from_directory(app.config['STATIC_FOLDER'], filename) @app.route('/') def index(): return render_template("index.html", cuda=cfg.cuda, lang_code=cfg.lang_code, language=cfg.LANG, root_dir=ROOT_DIR.replace('\\', '/')) # 上传音频 @app.route('/upload', methods=['POST']) def upload(): try: # 获取上传的文件 audio_file = request.files['audio'] # 如果是mp4 noextname, ext = os.path.splitext(audio_file.filename) ext = ext.lower() # 如果是视频,先分离 wav_file = os.path.join(cfg.TMP_DIR, f'{noextname}.wav') if os.path.exists(wav_file) and os.path.getsize(wav_file) > 0: return jsonify({'code': 0, 'msg': cfg.transobj['lang1'], "data": os.path.basename(wav_file)}) msg = "" if ext in ['.mp4', '.mov', '.avi', '.mkv', '.mpeg', '.mp3', '.flac']: video_file = os.path.join(cfg.TMP_DIR, f'{noextname}{ext}') audio_file.save(video_file) params = [ "-i", video_file, ] if ext not in ['.mp3', '.flac']: params.append('-vn') params.append(wav_file)
self.similar_filter = SimilarImageFilter()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: jesenzhang/ComfyUI_StreamDiffusion # Path: streamdiffusion/image_filter.py class SimilarImageFilter: def __init__(self, threshold: float = 0.98, max_skip_frame: float = 10) -> None: self.threshold = threshold self.prev_tensor = None self.cos = torch.nn.CosineSimilarity(dim=0, eps=1e-6) self.max_skip_frame = max_skip_frame self.skip_count = 0 def __call__(self, x: torch.Tensor) -> Optional[torch.Tensor]: if self.prev_tensor is None: self.prev_tensor = x.detach().clone() return x else: cos_sim = self.cos(self.prev_tensor.reshape(-1), x.reshape(-1)).item() sample = random.uniform(0, 1) if self.threshold >= 1: skip_prob = 0 else: skip_prob = max(0, 1 - (1 - cos_sim) / (1 - self.threshold)) # not skip frame if skip_prob < sample: self.prev_tensor = x.detach().clone() return x # skip frame else: if self.skip_count > self.max_skip_frame: self.skip_count = 0 self.prev_tensor = x.detach().clone() return x else: self.skip_count += 1 return None def set_threshold(self, threshold: float) -> None: self.threshold = threshold def set_max_skip_frame(self, max_skip_frame: float) -> None: self.max_skip_frame = max_skip_frame # Path: streamdiffusion/image_utils.py def postprocess_image( image: torch.Tensor, output_type: str = "pil", do_denormalize: Optional[List[bool]] = None, ) -> Union[torch.Tensor, np.ndarray, PIL.Image.Image]: if not isinstance(image, torch.Tensor): raise ValueError( f"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor" ) if output_type == "latent": return image do_normalize_flg = True if do_denormalize is None: do_denormalize = [do_normalize_flg] * image.shape[0] image = torch.stack( [ denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0]) ] ) if output_type == "pt": return image image = pt_to_numpy(image) if output_type == "np": return image if output_type == "pil": return numpy_to_pil(image) # Path: streamdiffusion/pipeline.py import time import numpy as np import PIL.Image import torch from typing import List, Optional, Union, Any, Dict, Tuple, Literal from diffusers import LCMScheduler, StableDiffusionPipeline from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import ( retrieve_latents, ) from .image_filter import SimilarImageFilter from .image_utils import postprocess_image class StreamDiffusion: def __init__( self, pipe: StableDiffusionPipeline, t_index_list: List[int], torch_dtype: torch.dtype = torch.float16, width: int = 512, height: int = 512, do_add_noise: bool = True, use_denoising_batch: bool = True, frame_buffer_size: int = 1, cfg_type: Literal["none", "full", "self", "initialize"] = "self", ) -> None: self.device = pipe.device self.dtype = torch_dtype self.generator = None self.height = height self.width = width self.latent_height = int(height // pipe.vae_scale_factor) self.latent_width = int(width // pipe.vae_scale_factor) self.frame_bff_size = frame_buffer_size self.denoising_steps_num = len(t_index_list) self.cfg_type = cfg_type if use_denoising_batch: self.batch_size = self.denoising_steps_num * frame_buffer_size if self.cfg_type == "initialize": self.trt_unet_batch_size = ( self.denoising_steps_num + 1 ) * self.frame_bff_size elif self.cfg_type == "full": self.trt_unet_batch_size = ( 2 * self.denoising_steps_num * self.frame_bff_size ) else: self.trt_unet_batch_size = self.denoising_steps_num * frame_buffer_size else: self.trt_unet_batch_size = self.frame_bff_size self.batch_size = frame_buffer_size self.t_list = t_index_list self.do_add_noise = do_add_noise self.use_denoising_batch = use_denoising_batch self.similar_image_filter = False
for key, value in _state_dict(model).items():
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: neobundy/MLX-Stable-Diffusion-WebUI # Path: stable_diffusion/config.py class DiffuserModelPathConfig: class BaseConfig: class AutoencoderConfig(BaseConfig): class CLIPTextModelConfig(BaseConfig): class UNetConfig(BaseConfig): class DiffusionConfig(BaseConfig): def __init__(self, model_path: str = "./diffuser_models"): def unet_config(self): def unet(self): def scheduler(self): def text_encoder_config(self): def text_encoder(self): def vae_config(self): def vae(self): def diffusion_config(self): def tokenizer_vocab(self): def tokenizer_merges(self): def __getitem__(self, key): def __setitem__(self, key, value): # Path: stable_diffusion/model_io.py _DEBUG = False def _debug_print(*args, **kwargs): def _from_numpy(x): def map_unet_weights(key, value): def map_clip_text_encoder_weights(key, value): def map_vae_weights(key, value): def _flatten(params): def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False): def _check_key(key: str, part: str): def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False): def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False): def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False): def load_diffusion_config(key: str = _DEFAULT_MODEL): def load_tokenizer(key: str = _DEFAULT_MODEL): def load_unet_local(weights_path: str, config_path: str, float16: bool = False): def load_text_encoder_local(weights_path: str, config_path: str, float16: bool = False): def load_autoencoder_local(weights_path: str, config_path: str, float16: bool = False): def load_diffusion_config_local(config_path:str): def load_tokenizer_local(vocab_path: str, merges_path: str): def load_diffuser_model(diffuser_model_path: str, float16: bool = False): # Path: utils.py def _state_dict(model): """Return the model's state_dict as a dictionary.""" state_dict = {} for name, param in model.parameters().items(): state_dict[name] = param return state_dict # Path: utils.py def get_state_dict_from_safetensor(checkpoint_path: str): """Return the state_dict from the checkpoint.""" state_dict = {} with safetensor_open(checkpoint_path, framework="numpy") as f: # Access the data in the file for key in f.keys(): tensor = f.get_tensor(key) state_dict[key] = tensor return state_dict # Path: model_inspector.py from stable_diffusion.config import PathConfig from stable_diffusion.model_io import preload_models_from_safetensor_weights from utils import _state_dict from utils import get_state_dict_from_safetensor INSPECTION_FILE = "model_inspection.txt" NUM_ITEMS = 100 MODEL_FILE = "./models/v2-1_512-ema-pruned.safetensors" MODEL_FILE1 = "./unet/diffusion_pytorch_model_test.safetensors" MODEL_FILE2 = "./unet/xxmix9realistic_v40.safetensors" # Recreate the inspection file at every execution of the script with open(INSPECTION_FILE, 'w') as f: pass def write_to_file(*args, **kwargs): """Write the text to the inspection file.""" # Convert the arguments to a string message = ' '.join(map(str, args)) # Print the message to the console print(message, **kwargs) # Open the log file in append mode and write the message with open(INSPECTION_FILE, 'a') as f: f.write(message + '\n') def inspect_model(path_config: PathConfig, keys_only=True): """Inspect the contents of the models.""" # Load the models using the provided config and weights paths unet_model = load_unet_local(path_config.unet_config, MODEL_FILE) text_encoder_model = load_text_encoder_local(MODEL_FILE) autoencoder_model = load_autoencoder_local(MODEL_FILE) diffusion_config = load_diffusion_config_local(path_config.diffusion_config) tokenizer = load_tokenizer_local(path_config.tokenizer_vocab, path_config.tokenizer_merges) # Convert the models' state_dict to a dictionary and iterate over it for model_name, model in zip(["unet", "text_encoder", "autoencoder"], [unet_model, text_encoder_model, autoencoder_model]): write_to_file("-" * 50) write_to_file(f"Model: {model_name}") write_to_file("-" * 50)
.where(meme_source.c.type == MemeSourceType.TELEGRAM)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: ffmemes/ff-backend # Path: src/database.py DATABASE_URL = str(settings.DATABASE_URL) async def fetch_one(select_query: Select | Insert | Update) -> dict[str, Any] | None: async def fetch_all(select_query: Select | Insert | Update) -> list[dict[str, Any]]: async def execute(select_query: Insert | Update) -> CursorResult: # Path: src/storage/parsers/schemas.py class TgChannelPostParsingResult(CustomModel): post_id: int url: str content: str | None = None # post text media: list[dict] | None = None views: int date: datetime mentions: list[str] | None = None # mentioned usernames hashtags: list[str] | None = None forwarded: dict | None = None forwarded_url: str | None = None # url to forwarded post link_preview: dict | None = None out_links: list[str] | None = None # Path: src/storage/parsers/schemas.py class VkGroupPostParsingResult(CustomModel): post_id: str url: str content: str | None = None # post text media: list[str] date: datetime views: int likes: int reposts: int comments: int # Path: src/storage/constants.py class MemeSourceType(str, Enum): TELEGRAM = "telegram" VK = "vk" REDDIT = "reddit" INSTAGRAM = "instagram" TWITTER = "twitter" TIKTOK = "tiktok" USER_UPLOAD = "user upload" # Path: src/storage/constants.py class MemeSourceStatus(str, Enum): IN_MODERATION = "in_moderation" PARSING_ENABLED = "parsing_enabled" PARSING_DISABLED = "parsing_disabled" # Path: src/storage/constants.py class MemeType(str, Enum): IMAGE = "image" ANIMATION = "animation" VIDEO = "video" # Path: src/storage/constants.py class MemeStatus(str, Enum): CREATED = "created" OK = "ok" DUPLICATE = "duplicate" AD = "ad" BROKEN_CONTENT_LINK = "broken_content_link" # TODO: more statuses? # IN_MODERATION = "in_moderation" # Path: src/storage/constants.py MEME_RAW_TELEGRAM_MEME_SOURCE_POST_UNIQUE_CONSTRAINT = "meme_raw_telegram_meme_source_id_post_id_key" # Path: src/storage/constants.py MEME_RAW_VK_MEME_SOURCE_POST_UNIQUE_CONSTRAINT = "meme_raw_vk_meme_source_id_post_id_key" # Path: src/storage/service.py from typing import Any from datetime import datetime from sqlalchemy import select, nulls_first, text from sqlalchemy.dialects.postgresql import insert from src.database import ( language, meme, meme_source, meme_raw_telegram, meme_raw_vk, execute, fetch_one, fetch_all, ) from src.storage.parsers.schemas import TgChannelPostParsingResult, VkGroupPostParsingResult from src.storage.constants import ( MemeSourceType, MemeSourceStatus, MemeType, MemeStatus, MEME_RAW_TELEGRAM_MEME_SOURCE_POST_UNIQUE_CONSTRAINT, MEME_RAW_VK_MEME_SOURCE_POST_UNIQUE_CONSTRAINT, ) async def insert_parsed_posts_from_telegram( meme_source_id: int, telegram_posts: list[TgChannelPostParsingResult], ) -> None: posts = [ post.model_dump() | {"meme_source_id": meme_source_id} for post in telegram_posts ] insert_statement = insert(meme_raw_telegram).values(posts) insert_posts_query = insert_statement.on_conflict_do_update( constraint=MEME_RAW_TELEGRAM_MEME_SOURCE_POST_UNIQUE_CONSTRAINT, set_={ "media": insert_statement.excluded.media, "views": insert_statement.excluded.views, "updated_at": datetime.utcnow(), }, ) await execute(insert_posts_query) async def insert_parsed_posts_from_vk( meme_source_id: int, vk_posts: list[VkGroupPostParsingResult], ) -> None: posts = [ post.model_dump() | {"meme_source_id": meme_source_id} for post in vk_posts ] insert_statement = insert(meme_raw_vk).values(posts) insert_posts_query = insert_statement.on_conflict_do_update( constraint=MEME_RAW_VK_MEME_SOURCE_POST_UNIQUE_CONSTRAINT, set_={ "media": insert_statement.excluded.media, "views": insert_statement.excluded.views, "likes": insert_statement.excluded.likes, "reposts": insert_statement.excluded.reposts, "comments": insert_statement.excluded.comments, "updated_at": datetime.utcnow(), }, ) await execute(insert_posts_query) async def get_telegram_sources_to_parse(limit=10) -> list[dict[str, Any]]: select_query = ( select(meme_source)
self.target = encode_prompts(tokenizer, text_encoder, [target_prompt])
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Con6924/SPM # Path: src/misc/clip_templates.py # Path: src/engine/train_util.py def encode_prompts( tokenizer: CLIPTokenizer, text_encoder: CLIPTokenizer, prompts: list[str], return_tokens: bool = False, ): text_tokens = text_tokenize(tokenizer, prompts) text_embeddings = text_encode(text_encoder, text_tokens) if return_tokens: return text_embeddings, torch.unique(text_tokens, dim=1) return text_embeddings # Path: src/configs/prompt.py from typing import Literal, Optional, Union from pathlib import Path from pydantic import BaseModel, root_validator from transformers import CLIPTextModel, CLIPTokenizer from src.misc.clip_templates import imagenet_templates from src.engine.train_util import encode_prompts import yaml import pandas as pd import random import torch class PromptEmbedsXL: text_embeds: torch.FloatTensor pooled_embeds: torch.FloatTensor def __init__(self, embeds) -> None: self.text_embeds, self.pooled_embeds = embeds PROMPT_EMBEDDING = Union[torch.FloatTensor, PromptEmbedsXL] class PromptEmbedsCache: prompts: dict[str, PROMPT_EMBEDDING] = {} def __setitem__(self, __name: str, __value: PROMPT_EMBEDDING) -> None: self.prompts[__name] = __value def __getitem__(self, __name: str) -> Optional[PROMPT_EMBEDDING]: if __name in self.prompts: return self.prompts[__name] else: return None class PromptSettings(BaseModel): # yaml target: str positive: str = None # if None, target will be used unconditional: str = "" # default is "" neutral: str = None # if None, unconditional will be used action: ACTION_TYPES = "erase" # default is "erase" guidance_scale: float = 1.0 # default is 1.0 resolution: int = 512 # default is 512 dynamic_resolution: bool = False # default is False batch_size: int = 1 # default is 1 dynamic_crops: bool = False # default is False. only used when model is XL use_template: bool = False # default is False la_strength: float = 1000.0 sampling_batch_size: int = 4 seed: int = None case_number: int = 0 @root_validator(pre=True) def fill_prompts(cls, values): keys = values.keys() if "target" not in keys: raise ValueError("target must be specified") if "positive" not in keys: values["positive"] = values["target"] if "unconditional" not in keys: values["unconditional"] = "" if "neutral" not in keys: values["neutral"] = values["unconditional"] return values class PromptEmbedsPair: target: PROMPT_EMBEDDING # the concept that do not want to generate positive: PROMPT_EMBEDDING # generate the concept unconditional: PROMPT_EMBEDDING # uncondition (default should be empty) neutral: PROMPT_EMBEDDING # base condition (default should be empty) use_template: bool = False # use clip template or not guidance_scale: float resolution: int dynamic_resolution: bool batch_size: int dynamic_crops: bool loss_fn: torch.nn.Module action: ACTION_TYPES def __init__( self, loss_fn: torch.nn.Module, target: PROMPT_EMBEDDING, positive: PROMPT_EMBEDDING, unconditional: PROMPT_EMBEDDING, neutral: PROMPT_EMBEDDING, settings: PromptSettings, ) -> None: self.loss_fn = loss_fn self.target = target self.positive = positive self.unconditional = unconditional self.neutral = neutral self.settings = settings self.use_template = settings.use_template self.guidance_scale = settings.guidance_scale self.resolution = settings.resolution self.dynamic_resolution = settings.dynamic_resolution self.batch_size = settings.batch_size self.dynamic_crops = settings.dynamic_crops self.action = settings.action self.la_strength = settings.la_strength self.sampling_batch_size = settings.sampling_batch_size def _prepare_embeddings( self, cache: PromptEmbedsCache, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, ): """ Prepare embeddings for training. When use_template is True, the embeddings will be format using a template, and then be processed by the model. """ if not self.use_template: return template = random.choice(imagenet_templates) target_prompt = template.format(self.settings.target) if cache[target_prompt]: self.target = cache[target_prompt] else:
input_seqs = read_fasta_file(fasta_file)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: dakpinaroglu/Frame2seq # Path: frame2seq/utils/residue_constants.py def load_stereo_chemical_props() -> Tuple[Mapping[str, List[Bond]], def make_bond_key(atom1_name, atom2_name): def sequence_to_onehot( sequence: str, mapping: Mapping[str, int], ) -> np.ndarray: def _make_standard_atom_mask() -> np.ndarray: def _make_rigid_transformation_4x4(ex, ey, translation): AA_TO_ID = { 'A': 0, 'C': 1, 'D': 2, 'E': 3, 'F': 4, 'G': 5, 'H': 6, 'I': 7, 'K': 8, 'L': 9, 'M': 10, 'N': 11, 'P': 12, 'Q': 13, 'R': 14, 'S': 15, 'T': 16, 'V': 17, 'W': 18, 'Y': 19, 'X': 20, } ID_TO_AA = { 0: 'A', 1: 'C', 2: 'D', 3: 'E', 4: 'F', 5: 'G', 6: 'H', 7: 'I', 8: 'K', 9: 'L', 10: 'M', 11: 'N', 12: 'P', 13: 'Q', 14: 'R', 15: 'S', 16: 'T', 17: 'V', 18: 'W', 19: 'Y', 20: 'X', } STANDARD_ATOM_MASK = _make_standard_atom_mask() # Path: frame2seq/utils/util.py def get_neg_pll(probs, seq): seq_probs = torch.gather(probs, 1, seq.unsqueeze(-1)).squeeze(-1) neg_pll = -1 * torch.log(seq_probs) avg_neg_pll = neg_pll.sum().item() / len(neg_pll) return neg_pll, avg_neg_pll # Path: frame2seq/utils/util.py def read_fasta_file(fasta_file): """ Read a fasta file and return a list of sequences. """ with open(fasta_file, 'r') as f: lines = f.readlines() sequences = [] for line in lines: if line[0] == '>': sequences.append(lines[lines.index(line) + 1].strip()) return sequences # Path: frame2seq/utils/pdb2input.py def get_inference_inputs(pdb_file, chain_id): atom_positions, aatype, seq_mask = get_parsed_inputs(pdb_file, chain_id) seq_mask = seq_mask.unsqueeze(0) aatype = torch.from_numpy(aatype) aatype = aatype.unsqueeze(0) X = atom_positions X = X.unsqueeze(0) return seq_mask, aatype, X # Path: frame2seq/utils/pred2output.py def output_csv(preds, csv_dir): """ Given average negative pseudo-log-likelihoods, write to a csv file. """ df = pd.DataFrame(columns=[ 'PDBID', 'Chain ID', 'Sample Number', 'Scored sequence', 'Average negative pseudo-log-likelihood', 'Temperature' ], data=preds) df.to_csv(f"{csv_dir}/scores.csv", index=False) # Path: frame2seq/utils/pred2output.py def output_indiv_csv(scores, csv_dir): """ Given per-residue negative pseudo-log-likelihoods, write to a csv file. """ pdbid = scores['pdbid'] chain = scores['chain'] sample = scores['sample'] res_idx = scores['res_idx'] neg_pll = scores['neg_pll'] df = pd.DataFrame( list(zip(res_idx, neg_pll)), columns=['Residue index', 'Negative pseudo-log-likelihood']) df.to_csv(f"{csv_dir}/{pdbid}_{chain}_seq{sample}.csv", index=False) # Path: frame2seq/utils/score.py import os import torch from tqdm import tqdm from frame2seq.utils import residue_constants from frame2seq.utils.util import get_neg_pll, read_fasta_file from frame2seq.utils.pdb2input import get_inference_inputs from frame2seq.utils.pred2output import output_csv, output_indiv_csv def score(self, pdb_file, chain_id, fasta_file, save_indiv_neg_pll): temperature = 1.0 seq_mask, aatype, X = get_inference_inputs(pdb_file, chain_id) seq_mask = seq_mask.to(self.device) aatype = aatype.to(self.device) X = X.to(self.device) str_form = [residue_constants.ID_TO_AA[int(i)] for i in aatype[0]] input_aatype_onehot = residue_constants.sequence_to_onehot( sequence=str_form, mapping=residue_constants.AA_TO_ID, ) input_aatype_onehot = torch.from_numpy(input_aatype_onehot).float() input_aatype_onehot = input_aatype_onehot.unsqueeze(0) input_aatype_onehot = input_aatype_onehot.to(self.device) input_aatype_onehot = torch.zeros_like(input_aatype_onehot) input_aatype_onehot[:, :, 20] = 1 # all positions are masked (set to unknown) scores, preds = {}, [] with torch.no_grad(): pred_seq1 = self.models[0].forward(X, seq_mask, input_aatype_onehot) pred_seq2 = self.models[1].forward(X, seq_mask, input_aatype_onehot) pred_seq3 = self.models[2].forward(X, seq_mask, input_aatype_onehot) pred_seq = (pred_seq1 + pred_seq2 + pred_seq3) / 3 # ensemble pred_seq = pred_seq / temperature pred_seq = torch.nn.functional.softmax(pred_seq, dim=-1) pred_seq = pred_seq[seq_mask] if fasta_file is not None:
self.push_screen(Main())
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: davep/oshit # Path: oshit/app/data/config.py @lru_cache(maxsize=None) def load_configuration() -> Configuration: """Load the configuration. Returns: The configuration. Note: As a side-effect, if the configuration doesn't exist a default one will be saved to storage. This function is designed so that it's safe and low-cost to repeatedly call it. The configuration is cached and will only be loaded from storage when necessary. """ source = configuration_file() return ( Configuration(**loads(source.read_text(encoding="utf-8"))) if source.exists() else save_configuration(Configuration()) ) # Path: oshit/app/data/config.py def save_configuration(configuration: Configuration) -> Configuration: """Save the given configuration. Args: The configuration to store. Returns: The configuration. """ load_configuration.cache_clear() configuration_file().write_text( dumps(asdict(configuration), indent=4), encoding="utf-8" ) return load_configuration() # Path: oshit/app/screens/main.py class Main(Screen[None]): """The main screen of the application.""" CONTEXT_HELP = """ ## Application keys | Key | Description | | - | - | | <kbd>F1</kbd> | This help screen. | | <kbd>F2</kbd> | Toggle compact/relaxed display. | | <kbd>F3</kbd> | Toggle dark/light mode. | | <kbd>F12</kbd> | Quit the application. | | <kbd>t</kbd> | View the top stories. | | <kbd>n</kbd> | View the new stories. | | <kbd>b</kbd> | View the best stories. | | <kbd>a</kbd> | View the AskHN stories. | | <kbd>s</kbd> | View the ShowHN stories. | | <kbd>j</kbd> | View the jobs. | """ CSS = """ TabbedContent, LoadingIndicator { background: $panel; } """ TITLE = f"Orange Site Hit v{__version__}" BINDINGS = [ Binding("f1", "help", "Help"), Binding("f2", "compact", "Compact/Relaxed"), Binding("f3", "toggle_dark"), Binding("f12", "quit", "Quit"), Binding("t", "go('top')"), Binding("n", "go('new')"), Binding("b", "go('best')"), Binding("a", "go('ask')"), Binding("s", "go('show')"), Binding("j", "go('jobs')"), Binding("down, enter", "pane"), ] def __init__(self) -> None: """Initialise the screen.""" super().__init__() config = load_configuration() self._hn = HN( max_concurrency=config.maximum_concurrency, timeout=config.connection_timeout, ) """The HackerNews client object.""" def compose(self) -> ComposeResult: """Compose the main screen's layout.""" yield Header() with HackerNews(): yield Items("top", "t", self._hn.top_stories) yield Items("new", "n", self._hn.new_stories) yield Items("best", "b", self._hn.best_stories) yield Items("ask", "a", self._hn.latest_ask_stories) yield Items("show", "s", self._hn.latest_show_stories) yield Items("jobs", "j", self._hn.latest_job_stories) yield Footer() def _refresh_subtitle(self) -> None: """Refresh the subtitle of the screen.""" self.sub_title = self.query_one(HackerNews).description def on_mount(self) -> None: """Configure things once the DOM is ready.""" self.set_interval(0.95, self._refresh_subtitle) def action_help(self) -> None: """Show the help screen.""" self.app.push_screen(Help(self)) def action_go(self, items: str) -> None: """Go to the given list of items. Args: items: The name of the list of items to go to. """ self.query_one(HackerNews).active = items self.query_one(HackerNews).focus_active_pane() def action_compact(self) -> None: """Toggle the compact display.""" news = self.query_one(HackerNews) news.compact = not news.compact @on(ShowUser) def show_user(self, event: ShowUser) -> None: """Handle a request to show the details of a user.""" self.app.push_screen(UserDetails(self._hn, event.user)) @on(ShowComments) def show_comments(self, event: ShowComments) -> None: """Handle a request to show the comments for an article.""" self.app.push_screen(Comments(self._hn, event.article)) # Path: oshit/app/oshit.py from textual.app import App from .data import load_configuration, save_configuration from .screens import Main """The main application class.""" ############################################################################## # Textual imports. ############################################################################## # Local imports. ############################################################################## class OSHit(App[None]): """The Orange Site Hit application.""" ENABLE_COMMAND_PALETTE = False def __init__(self) -> None: """Initialise the application.""" super().__init__() self.dark = load_configuration().dark_mode def on_mount(self) -> None: """Get things going once the app is up and running."""
self.retrieval_memory = RetrievalMemory(persistent_db_path, embedding_model_name, collection_name)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Maximilian-Winter/llama-cpp-agent # Path: src/llama_cpp_agent/function_calling.py class LlamaCppFunctionTool: def __init__(self, pydantic_model: Type[BaseModel], has_markdown_code_block=False, has_triple_quoted_string=False, **additional_parameters): self.model = pydantic_model self.look_for_field_string = has_markdown_code_block or has_triple_quoted_string self.has_markdown_code_block = has_markdown_code_block self.has_triple_quoted_string = has_triple_quoted_string self.additional_parameters = additional_parameters if additional_parameters else {} def __call__(self, *args, **kwargs): return self.model(**kwargs) # Path: src/llama_cpp_agent/agent_memory/core_memory_manager.py class CoreMemoryManager: def __init__(self, core_memory: dict): self.core_memory = core_memory def add_to_core_memory(self, key: str, child_key: str, value) -> str: """ Adds or updates an entry in the core memory. """ if key not in self.core_memory: self.core_memory[key] = {} self.core_memory[key][child_key] = value return f"Core memory updated. Key: {key}, Child Key: {child_key}" def replace_in_core_memory(self, key: str, child_key: str, new_value) -> str: """ Replaces an existing entry in the core memory. """ if key in self.core_memory and child_key in self.core_memory[key]: self.core_memory[key][child_key] = new_value return f"Core memory replaced. Key: {key}, Child Key: {child_key}" else: return "Key or child key not found in core memory." def remove_from_core_memory(self, key: str, child_key: str) -> str: """ Removes a specific field from a core memory entry. """ if key in self.core_memory and child_key in self.core_memory[key]: del self.core_memory[key][child_key] return f"Core memory entry removed. Key: {key}, Child Key: {child_key}" else: return "Key or child key not found in core memory." def build_core_memory_context(self): output = json.dumps(self.core_memory, indent=4) context = f"# Core-Memory:\n{output if output != '{}' else 'Empty'}" return context def load(self, file_path): with open(file_path, 'r', encoding='utf-8') as file: self.core_memory = json.load(file) def save(self, file_path): with open(file_path, 'w', encoding='utf-8') as file: json.dump(self.core_memory, file, indent=4) # Path: src/llama_cpp_agent/agent_memory/retrieval_memory_manager.py class RetrievalMemoryManager: def __init__(self, retrieval_memory: RetrievalMemory): def add_memory_to_retrieval(self, description: str, importance: float = 1.0) -> str: def retrieve_memories(self, query: str, max_results: int = 5) -> str: # Path: src/llama_cpp_agent/agent_memory/memory_tools.py from pydantic import BaseModel, Field from ..function_calling import LlamaCppFunctionTool from .core_memory_manager import CoreMemoryManager from .retrieval_memory_manager import RetrievalMemoryManager, RetrievalMemory class AddCoreMemory(BaseModel): """ Add a new entry to the core memory. """ key: str = Field(..., description="The key identifier for the core memory entry.") field: str = Field(..., description="A secondary key or field within the core memory entry.") value: str = Field(..., description="The value or data to be stored in the specified core memory entry.") def run(self, core_memory_manager: CoreMemoryManager): return core_memory_manager.add_to_core_memory(self.key, self.field, self.value) # Replace Core Memory Model class ReplaceCoreMemory(BaseModel): """ Replace an entry in the core memory. """ key: str = Field(..., description="The key identifier for the core memory entry.") field: str = Field(..., description="The specific field within the core memory entry to be replaced.") new_value: str = Field(..., description="The new value to replace the existing data in the specified core memory field.") def run(self, core_memory_manager: CoreMemoryManager): return core_memory_manager.replace_in_core_memory(self.key, self.field, self.value) class RemoveCoreMemory(BaseModel): """ Remove an entry in the core memory. """ key: str = Field(..., description="The key identifier for the core memory entry to be removed.") field: str = Field(..., description="The specific field within the core memory entry to be removed.") def run(self, core_memory_manager: CoreMemoryManager): return core_memory_manager.remove_from_core_memory(self.key, self.field) class RetrieveMemories(BaseModel): """ Retrieve memories from the retrieval memory based on a query. """ query: str = Field(..., description="The query to be used to retrieve memories from the retrieval memory.") def run(self, retrieval_memory_manager: RetrievalMemoryManager): return retrieval_memory_manager.retrieve_memories(self.query) class AddRetrievalMemory(BaseModel): """ Add memory to the retrieval memory. """ memory: str = Field(..., description="The memory to be added to the retrieval memory.") importance: float = Field(..., description="The importance of the memory to be added to the retrieval memory.") def run(self, retrieval_memory_manager: RetrievalMemoryManager): return retrieval_memory_manager.add_memory_to_retrieval(self.memory, self.importance) class AgentRetrievalMemory: def __init__(self, persistent_db_path="./retrieval_memory", embedding_model_name="all-MiniLM-L6-v2", collection_name="retrieval_memory_collection"):
"dot": Dot,
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: tedivm/paracelsus # Path: paracelsus/transformers/dot.py class Dot: comment_format: str = "dot" metadata: MetaData graph: pydot.Dot def __init__(self, metaclass: MetaData) -> None: self.metadata = metaclass self.graph = pydot.Dot("database", graph_type="graph") for table in self.metadata.tables.values(): node = pydot.Node(name=table.name) node.set_label(self._table_label(table)) node.set_shape("none") node.set_margin("0") self.graph.add_node(node) for column in table.columns: for foreign_key in column.foreign_keys: key_parts = foreign_key.target_fullname.split(".") left_table = key_parts[0] left_column = key_parts[1] edge = pydot.Edge(left_table, table.name) edge.set_label(column.name) edge.set_dir("both") edge.set_arrowhead("none") if not column.unique: edge.set_arrowhead("crow") l_column = self.metadata.tables[left_table].columns[left_column] edge.set_arrowtail("none") if not l_column.unique and not l_column.primary_key: edge.set_arrowtail("crow") self.graph.add_edge(edge) def _table_label(self, table: Table) -> str: column_output = "" columns = sorted(table.columns, key=utils.column_sort_key) for column in columns: attributes = set([]) if column.primary_key: attributes.add("Primary Key") if len(column.foreign_keys) > 0: attributes.add("Foreign Key") if column.unique: attributes.add("Unique") column_output += f' <tr><td align="left">{column.type}</td><td align="left">{column.name}</td><td>{", ".join(sorted(attributes))}</td></tr>\n' return f"""< <table border="0" cellborder="1" cellspacing="0" cellpadding="4"> <tr><td colspan="3" bgcolor="lightblue"><b>{table.name}</b></td></tr> {column_output.rstrip()} </table> >""" def __str__(self) -> str: return self.graph.to_string() # Path: paracelsus/transformers/mermaid.py class Mermaid: comment_format: str = "mermaid" metadata: MetaData def __init__(self, metaclass: MetaData) -> None: self.metadata = metaclass def _table(self, table: Table) -> str: output = f"\t{table.name}" output += " {\n" columns = sorted(table.columns, key=utils.column_sort_key) for column in columns: output += self._column(column) output += "\t}\n\n" return output def _column(self, column: Column) -> str: column_str = f"{column.type} {column.name}" if column.primary_key: if len(column.foreign_keys) > 0: column_str += " PK,FK" else: column_str += " PK" elif len(column.foreign_keys) > 0: column_str += " FK" options = [] if column.nullable: options.append("nullable") if column.unique: options.append("unique") if column.index: options.append("indexed") if len(options) > 0: column_str += f' "{",".join(options)}"' return f"\t\t{column_str}\n" def _relationships(self, column: Column) -> str: output = "" column_name = column.name right_table = column.table.name if column.unique: right_operand = "o|" else: right_operand = "o{" for foreign_key in column.foreign_keys: key_parts = foreign_key.target_fullname.split(".") left_table = key_parts[0] left_column = key_parts[1] left_operand = "" lcolumn = self.metadata.tables[left_table].columns[left_column] if lcolumn.unique or lcolumn.primary_key: left_operand = "||" else: left_operand = "}o" output += f"\t{left_table} {left_operand}--{right_operand} {right_table} : {column_name}\n" return output def __str__(self) -> str: output = "erDiagram\n" for table in self.metadata.tables.values(): output += self._table(table) for table in self.metadata.tables.values(): for column in table.columns.values(): if len(column.foreign_keys) > 0: output += self._relationships(column) return output # Path: paracelsus/cli.py import importlib import re import sys import typer from enum import Enum from pathlib import Path from typing import List from typing_extensions import Annotated from .transformers.dot import Dot from .transformers.mermaid import Mermaid from . import _version app = typer.Typer() transformers = { "mmd": Mermaid, "mermaid": Mermaid,
send_message(update.from_id, "😫 You are not allowed to use this bot.")
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: winniesi/tg-gemini-bot # Path: api/auth.py def is_authorized(from_id: int, user_name: str) -> bool: if str(user_name) in ALLOWED_USERS: return True return False # Path: api/context.py class ChatManager: """setting up a basic conversation storage manager""" def __init__(self): self.chats: Dict[str, ChatConversation] = {} def _new_chat(self, username: str) -> ChatConversation: chat = ChatConversation() self.chats[username] = chat return chat def get_chat(self, username: str) -> ChatConversation: if self.chats.get(username) is None: return self._new_chat(username) return self.chats[username] # Path: api/context.py class ImageChatManger: def __init__(self, prompt, file_id: str) -> None: self.prompt = prompt self.file_id = file_id def tel_photo_url(self) -> str: """process telegram photo url""" r_file_id = requests.get( f"https://api.telegram.org/bot{BOT_TOKEN}/getFile?file_id={self.file_id}" ) file_path = r_file_id.json().get("result").get("file_path") download_url = f"https://api.telegram.org/file/bot{BOT_TOKEN}/{file_path}" return download_url def photo_bytes(self) -> BytesIO: """get photo bytes""" photo_url = self.tel_photo_url() response = requests.get(photo_url) photo_bytes = BytesIO(response.content) return photo_bytes def send_image(self) -> str: response = generate_text_with_image(self.prompt, self.photo_bytes()) return response # Path: api/telegram.py class Update: def __init__(self, update: Dict) -> None: self.update = update self.from_id = update["message"]["from"]["id"] self.type = self._type() self.text = self._text() self.photo_caption = self._photo_caption() self.file_id = self._file_id() self.user_name = update["message"]["from"]["username"] def _type(self): if "text" in self.update["message"]: return "text" elif "photo" in self.update["message"]: return "photo" else: return "" def _photo_caption(self): if self.type == "photo": return self.update["message"].get("caption", "describe the photo") return "" def _text(self): if self.type == "text": return self.update["message"]["text"] return "" def _file_id(self): if self.type == "photo": return self.update["message"]["photo"][0]["file_id"] return "" # Path: api/telegram.py def send_message(chat_id, text): """send text message""" payload = { "chat_id": chat_id, "text": escape(text), "parse_mode": "MarkdownV2", } r = requests.post(f"{TELEGRAM_API}/sendMessage", data=payload) print(f"Sent message: {text} to {chat_id}") return r # Path: api/handle.py from .auth import is_authorized from .context import ChatManager, ImageChatManger from .telegram import Update, send_message """ All the chat that comes through the Telegram bot gets passed to the handle_message function. This function checks out if the user has the green light to chat with the bot. Once that's sorted, it figures out if the user sent words or an image and deals with it accordingly. For text messages, it fires up the ChatManager class that keeps track of the back-and-forth with that user. As for images, in Gemini pro, they're context-free, so you can handle them pretty straight-up without much fuss. """ chat_manager = ChatManager() def handle_message(update_data): update = Update(update_data) authorized = is_authorized(update.from_id, update.user_name) if not authorized:
raise error.flowFileException('Flow file does not exist.')
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: usail-hkust/LLMTSCS # Path: utils/utils.py def oneline_wrapper(dic_agent_conf, dic_traffic_env_conf, dic_path, roadnet, trafficflow): results_table = [] all_rewards = [] all_queue_len = [] all_travel_time = [] for i in range(1): dic_path["PATH_TO_MODEL"] = (dic_path["PATH_TO_MODEL"].split(".")[0] + ".json" + time.strftime('%m_%d_%H_%M_%S', time.localtime(time.time()))) dic_path["PATH_TO_WORK_DIRECTORY"] = (dic_path["PATH_TO_WORK_DIRECTORY"].split(".")[0] + ".json" + time.strftime('%m_%d_%H_%M_%S', time.localtime(time.time()))) oneline = OneLine(dic_agent_conf=dic_agent_conf, dic_traffic_env_conf=merge(config.dic_traffic_env_conf, dic_traffic_env_conf), dic_path=merge(config.DIC_PATH, dic_path), roadnet=roadnet, trafficflow=trafficflow ) round_results = oneline.train(round=i) results_table.append([round_results['test_reward_over'], round_results['test_avg_queue_len_over'], round_results['test_avg_travel_time_over']]) all_rewards.append(round_results['test_reward_over']) all_queue_len.append(round_results['test_avg_queue_len_over']) all_travel_time.append(round_results['test_avg_travel_time_over']) # delete junk cmd_delete_model = 'rm -rf <dir>'.replace("<dir>", dic_path["PATH_TO_MODEL"]) cmd_delete_work = 'find <dir> -type f ! -name "state_action.json" -exec rm -rf {} \;'.replace("<dir>", dic_path["PATH_TO_WORK_DIRECTORY"]) os.system(cmd_delete_model) os.system(cmd_delete_work) results_table.append([np.average(all_rewards), np.average(all_queue_len), np.average(all_travel_time)]) results_table.append([np.std(all_rewards), np.std(all_queue_len), np.std(all_travel_time)]) table_logger = wandb.init( project=dic_traffic_env_conf['PROJECT_NAME'], group=f"{dic_traffic_env_conf['MODEL_NAME']}-{roadnet}-{trafficflow}-{len(dic_agent_conf['FIXED_TIME'])}_Phases", name="exp_results", config=merge(merge(dic_agent_conf, dic_path), dic_traffic_env_conf), ) columns = ["reward", "avg_queue_len", "avg_travel_time"] logger_table = wandb.Table(columns=columns, data=results_table) table_logger.log({"results": logger_table}) wandb.finish() return # Path: utils/error.py class flowFileException(Exception): def __init__(self, message): def __str__(self): # Path: run_advanced_maxpressure.py from utils.utils import oneline_wrapper from utils import error from multiprocessing import Process import os import time import argparse def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--memo", type=str, default='AdvancedMaxPressure') parser.add_argument("--model", type=str, default="AdvancedMaxPressure") parser.add_argument("--proj_name", type=str, default="chatgpt-TSCS") parser.add_argument("--eightphase", action="store_true", default=False) parser.add_argument("--multi_process", action="store_true", default=True) parser.add_argument("--workers", type=int, default=1) parser.add_argument("--dataset", type=str, default="template") parser.add_argument("--traffic_file", type=str, default="flow_main_stream.json") return parser.parse_args() def main(in_args): traffic_file_list = [] if in_args.dataset == 'jinan': count = 3600 road_net = "3_4" traffic_file_list = ["anon_3_4_jinan_real.json", "anon_3_4_jinan_real_2000.json", "anon_3_4_jinan_real_2500.json"] template = "Jinan" elif in_args.dataset == 'hangzhou': count = 3600 road_net = "4_4" traffic_file_list = ["anon_4_4_hangzhou_real.json", "anon_4_4_hangzhou_real_5816.json"] template = "Hangzhou" elif in_args.dataset == 'newyork_16x3': count = 3600 road_net = "16_3" traffic_file_list = ["anon_16_3_newyork_real.json"] template = "NewYork" elif in_args.dataset == 'newyork_28x7': count = 3600 road_net = "28_7" traffic_file_list = ["anon_28_7_newyork_real_double.json", "anon_28_7_newyork_real_triple.json"] template = "NewYork" elif in_args.dataset == 'template': count = 3600 road_net = "1_1" traffic_file_list = ["flow_main_stream.json"] template = "template" # flow_file error try: if in_args.traffic_file not in traffic_file_list:
def validate(self, data: ndarray) -> Optional[InferredField]:
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: ohadmata/shmessy # Path: src/shmessy/schema.py class InferredField(BaseModel): inferred_type: Optional[str] = None inferred_pattern: Optional[Any] = None # Path: src/shmessy/schema.py class ValidatorTypes(str, Enum): NUMERIC = "NUMERIC" STRING = "STRING" # Path: src/shmessy/types/base.py class BaseType(ABC): weight: int = 0 validator_types: Tuple[ValidatorTypes] @abstractmethod def validate(self, data: ndarray) -> Optional[InferredField]: pass @abstractmethod def fix(self, column: Series, inferred_field: InferredField) -> Series: pass def is_validator_type_valid(self, dtype: Type) -> bool: for possible_validator_type in self.validator_types: if self._check_single_validator_type(dtype, possible_validator_type): return True return False @staticmethod def _check_single_validator_type( dtype: Type, possible_validator_type: ValidatorTypes ) -> bool: if possible_validator_type == ValidatorTypes.NUMERIC and not issubdtype( dtype, number ): return False if possible_validator_type == ValidatorTypes.STRING and not ( issubdtype(dtype, object_) or issubdtype(dtype, str_) ): return False return True @property def name(self) -> str: return str(self.__class__.__name__.replace("Type", "")) # Path: src/shmessy/types/unix_timestamp.py import logging import math from datetime import datetime from enum import Enum from typing import Optional from numpy import ndarray from pandas import Series, to_datetime from ..schema import InferredField, ValidatorTypes from .base import BaseType logger = logging.getLogger(__name__) class TimestampResolution(str, Enum): SECONDS = "s" MILLISECONDS = "ms" NANOSECONDS = "ns" class UnixTimestampType(BaseType): weight = 4 validator_types = (ValidatorTypes.NUMERIC,) min_valid_year: int = 1980 max_valid_year: int = 2100 @staticmethod def _unix_timestamp_resolution(value: float) -> TimestampResolution: number_of_digits = len(str(int(value))) if number_of_digits == 10: return TimestampResolution.SECONDS if number_of_digits == 13: return TimestampResolution.MILLISECONDS if number_of_digits == 16: return TimestampResolution.NANOSECONDS @staticmethod def _fix_input_resolution( value: float, selected_resolution: TimestampResolution ) -> float: if selected_resolution == TimestampResolution.SECONDS: return value if selected_resolution == TimestampResolution.MILLISECONDS: return value / 1000 if selected_resolution == TimestampResolution.NANOSECONDS: return value / 1000 / 1000
bought_token_curr_price = get_price(desired_token_address)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: kokiez/solana-sniper # Path: birdeye.py def get_price(token_address): url = f"https://api.dexscreener.com/latest/dex/tokens/{token_address}" exclude = ['EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB'] response = requests.get(url).json() if token_address not in exclude: for pair in response['pairs']: if pair['quoteToken']['address'] == 'So11111111111111111111111111111111111111112': return float(pair['priceUsd']) else: return response['pairs'][0]['priceUsd'] return None # Path: birdeye.py def getSymbol(token): # usdc and usdt exclude = ['EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB'] if token not in exclude: url = f"https://api.dexscreener.com/latest/dex/tokens/{token}" Token_Symbol = "" Sol_symbol="" try: response = requests.get(url) # Check if the request was successful (status code 200) if response.status_code == 200: resp = response.json() print("Response:",resp['pairs'][0]['baseToken']['symbol']) for pair in resp['pairs']: quoteToken = pair['quoteToken']['symbol'] if quoteToken == 'SOL': Token_Symbol = pair['baseToken']['symbol'] Sol_symbol = quoteToken return Token_Symbol, Sol_symbol else: print(f"[getSymbol] Request failed with status code {response.status_code}") except requests.exceptions.RequestException as e: print(f"[getSymbol] error occurred: {e}") except: a = 1 return Token_Symbol, Sol_symbol else: if token == 'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v': return "USDC", "SOL" elif token == 'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v': return "USDT", "SOL" # Path: webhook.py def sendWebhook(title_type_info, description): global error_webhook global webhook_url title = "" title_type = title_type_info.split("|") if title_type[0] == "msg": title = title_type[1] color = colors["Green"] webhook(title, color, description, webhook_url) elif title_type[0] == "msg_b": title = title_type[1] color = colors["DarkAqua"] webhook(title, color, description, webhook_url) elif title_type[0] == "msg_s": title = title_type[1] color = colors["DarkAqua"] webhook(title, color, description, webhook_url) elif title_type[0] == "i_s": #invest or slippage was changed etc title = title_type[1] color = colors["DarkPurple"] webhook(title, color, description, webhook_url) elif title_type[0] == "e": #error title = title_type[1] color = colors["DarkRed"] webhook(title, color, description, error_webhook) elif title_type[0] == "a": #alert title = title_type[1] color = colors["LuminousVividPink"] webhook(title, color, description, webhook_url) elif title_type[0] == "w": #wallet info title = title_type[1] color = colors["Gold"] webhook(title, color, description, webhook_url) # Path: monitor_price_strategy.py import time from birdeye import get_price, getSymbol from webhook import sendWebhook """If you have ton of trades then best to use Simulate Transaction and modify this part of code to your needs""" """ Only Take Profit """ def limit_order(bought_token_price,desired_token_address, take_profit_ratio, execution_time, txB): token_symbol, SOl_Symbol = getSymbol(desired_token_address) # CALCULATE SELL LIMIT sell_limit_token_price = bought_token_price * take_profit_ratio print("-" * 79) print(f"| {'Bought Price':<12} | {'Sell Limit':<12} | {'Tx Buy':<50} |") print("-" * 79) print(f"|{bought_token_price:.12f} | {sell_limit_token_price:.12f} {txB:<50} |") print("-" * 79) sendWebhook(f"msg_b|BUY INFO {token_symbol}",f"Bought Price: {bought_token_price:.12f}\n**Sell Limit: {sell_limit_token_price:.15f}**\nTotal Buy Execution time: {execution_time} seconds\nBuy TXN: https://solscan.io/tx/{txB} |") # LOOP = CHECK IF PRICE >= SELL LIMIT | checks price every 5 seconds priceLow = True # while priceLow and isTimePassed(time_limit) == False: while priceLow: # Check if time limit has been passed for the token bought or not
self.LayerNorm = LayerNormTorchAlike(config.hidden_size, eps=config.layer_norm_eps, correction=True)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: enochyearn/MLX_RoBERTa # Path: custom/nn/layers/normalization.py class LayerNormBasselCorrected(Module): r"""Applies layer normalization [1] on the inputs with Bessel's Correction used by default like PyTorch. Computes .. math:: y = \frac{x - E[x]}{\sqrt{Var[x]} + \epsilon} \gamma + \beta, where :math:`\gamma` and :math:`\beta` are learned per feature dimension parameters initialized at 1 and 0 respectively. Var[x] would by default apply Bessel's Correction. [1]: https://arxiv.org/abs/1607.06450 Args: dims (int): The feature dimension of the input to normalize over eps (float): A small additive constant for numerical stability affine (bool): If True learn an affine transform to apply after the normalization correction (bool): """ def __init__(self, dims: int, eps: float = 1e-5, affine: bool = True, correction: bool = True): super().__init__() if affine: self.bias = mx.zeros((dims,)) self.weight = mx.ones((dims,)) self.eps = eps self.dims = dims self.correction = correction def _extra_repr(self): return f"{self.dims}, eps={self.eps}, affine={'weight' in self}" def __call__(self, x): means = mx.mean(x, axis=-1, keepdims=True) var = mx.var(x, axis=-1, keepdims=True, ddof=int(self.correction)) x = (x - means) * mx.rsqrt(var + self.eps) return (self.weight * x + self.bias) if "weight" in self else x # Path: custom/nn/layers/normalization.py class LayerNormTorchAlike(Module): r"""Applies layer normalization [1] on the inputs in PyTorch's style. MLX's official LayerNorm has a different behavior with PyTorch's. Computes .. math:: y = \frac{x - E[x]}{\sqrt{Var[x]} + \epsilon} \gamma + \beta, where :math:`\gamma` and :math:`\beta` are learned per feature dimension parameters initialized at 1 and 0 respectively. Var[x] would by default apply Bessel's Correction. [1]: https://arxiv.org/abs/1607.06450 Args: dims (int): The feature dimension of the input to normalize over eps (float): A small additive constant for numerical stability affine (bool): If True learn an affine transform to apply after the normalization correction (bool): """ def __init__(self, dims: int, eps: float = 1e-5, affine: bool = True, correction: bool = True): super().__init__() if affine: self.bias = mx.zeros((dims,)) self.weight = mx.ones((dims,)) self.eps = eps self.dims = dims self.correction = correction def _extra_repr(self): return f"{self.dims}, eps={self.eps}, affine={'weight' in self}" def __call__(self, x): # Calculate the mean of all elements; # i.e. the means for each element $\mathbb{E}[X]$ mean = x.mean(axis=-1, keepdims=True) # Calculate the squared mean of all elements; # i.e. the means for each element $\mathbb{E}[X^2]$ mean_x2 = (x ** 2).mean(axis=-1, keepdims=True) # Variance of all element $Var[X] = \mathbb{E}[X^2] - \mathbb{E}[X]^2$ var = mean_x2 - mean ** 2 # Normalize $$\hat{X} = \frac{X - \mathbb{E}[X]}{\sqrt{Var[X] + \epsilon}}$$ x_norm = (x - mean) / mx.sqrt(var + self.eps) # Scale and shift $$\text{LN}(x) = \gamma \hat{X} + \beta$$ x_norm = self.weight * x_norm + self.bias return x_norm # Path: mlx_roberta.py import argparse import time import mlx.core as mx import mlx.nn as nn import numpy as np import math from mlx.utils import tree_unflatten from collections import OrderedDict from custom.nn.layers.normalization import LayerNormBasselCorrected, LayerNormTorchAlike from transformers import RobertaTokenizer from dataclasses import dataclass # utils @dataclass class ModelConfig: intermediate_size: int = 3072 hidden_size: int = 768 no_heads: int = 12 hidden_layers: int = 12 vocab_size: int = 50265 attention_probs_dropout_prob: float = 0.1 hidden_dropout_prob: float = 0.1 layer_norm_eps: float = 1e-5 max_position_embeddings: int = 514 # QA model's parameters num_labels: int = 2 type_vocab_size: int = 2 pad_token_id: int = 1 chunk_size_feed_forward: int = 0 model_configs = { "deepset/roberta-base-squad2": ModelConfig(), "roberta-base": ModelConfig(), } model_types = { "deepset/roberta-base-squad2": "qa", "roberta-base": "base", } class RobertaEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
@app.get("/tables", response_model=RList[Table])
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: zy7y/dfs-generate # Path: entity.py class CodeGen(BaseVo): name: str code: str @field_serializer("code") def serialize_code(self, code: str, _info): _code = black.format_str(code, mode=black.FileMode()) return isort.code(_code) # Path: entity.py class Conf(SQLModel, table=True): __tablename__ = "dfs_conf" id: int = Field(None, primary_key=True) db_uri: str = Field(..., description="数据库连接") @classmethod def get_db_uri_last_new(cls): """获取最新的db_url""" with Session(engine) as session: query = select(cls).order_by(cls.id.desc()) latest_conf = session.exec(query).first() if latest_conf: return latest_conf.db_uri else: return None @classmethod def create(cls, uri) -> "Conf": with Session(engine) as session: obj = cls(db_uri=uri) session.add(obj) session.commit() session.refresh(obj) return obj @classmethod def get_last_uri_with_metadata(cls): uri = cls.get_db_uri_last_new() return uri, get_metadata_by_db_uri(uri) # Path: entity.py class DBConf(SQLModel): user: str password: str port: int host: str db: str def get_db_uri(self): return f"mysql+pymysql://{self.user}:{self.password}@{self.host}:{self.port}/{self.db}" def get_metadata(self): return get_metadata_by_db_uri(self.get_db_uri()) # Path: entity.py class R(BaseModel, Generic[T]): code: int = 20000 msg: str = "ok" data: Optional[T] = None @classmethod def success(cls, **kwargs): return cls(**kwargs) @classmethod def error(cls, msg): return cls(code=40000, msg=msg) # Path: entity.py class RList(R[T]): data: List[T] = Field(default_factory=list) # Path: entity.py class Table(BaseVo): table_name: str table_comment: Optional[str] = None # Path: generate/main.py def generate_code(table: Table, uri: str): return [ {"name": "model.py", "code": GenerateEntity(table).render()}, {"name": "router.py", "code": render_router(table.name)}, {"name": "main.py", "code": render_main(table.name)}, {"name": "db.py", "code": render_db(uri)}, ] # Path: main.py from fastapi import FastAPI, Query from fastapi.requests import Request from fastapi.responses import FileResponse from fastapi.staticfiles import StaticFiles from entity import CodeGen, Conf, DBConf, R, RList, Table from generate.main import generate_code import uvicorn app = FastAPI( title="dfs-generate", description="FastAPI SQLModel 逆向生成代码", docs_url=None ) app.mount("/static", StaticFiles(directory="static"), name="static") @app.get("/", include_in_schema=False) def index(): return FileResponse("static/index.html")
v_search = VectorSearchEngine(item_embedding)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: CrawlScript/Torch-MGDCF # Path: torch_mgdcf/metrics/ranking.py def ndcg_score(reference, hypothesis): """ Normalized Discounted Cumulative Gain (nDCG) Normalized version of DCG: nDCG = DCG(hypothesis)/DCG(reference) Parameters: reference - a gold standard (perfect) ordering Ex: [5,4,3,2,1] hypothesis - a proposed ordering Ex: [5,2,2,3,1] Returns: ndcg_score - normalized score """ return dcg_score(hypothesis)/dcg_score(reference) # Path: torch_mgdcf/metrics/ranking.py def precision_score(reference, hypothesis): result = np.sum(hypothesis, dtype=np.float32)/len(hypothesis) return result # Path: torch_mgdcf/metrics/ranking.py def recall_score(reference, hypothesis): result = np.sum(hypothesis, dtype=np.float32) / len(reference) return result # Path: torch_mgdcf/vector_search/vector_search.py class VectorSearchEngine(object): def __init__(self, vectors): super().__init__() if isinstance(vectors, torch.Tensor): self.vectors = vectors.detach().cpu().numpy() else: self.vectors = np.array(vectors) self.dim = self.vectors.shape[1] self.index = faiss.IndexFlatIP(self.dim) self.index.add(self.vectors) def search(self, query_vectors, k=10): query_vectors = np.asarray(query_vectors) topK_distances, topK_indices = self.index.search(query_vectors, k) return topK_distances, topK_indices # Path: torch_mgdcf/evaluation/ranking.py from tqdm import tqdm from torch_mgdcf.metrics.ranking import ndcg_score, precision_score, recall_score from torch_mgdcf.vector_search.vector_search import VectorSearchEngine import numpy as np import torch # coding=utf-8 # The code is from our another project GRecX: https://github.com/maenzhier/grecx_datasets def score(ground_truth, pred_items, k_list, metrics): pred_match = [1 if item in ground_truth else 0 for item in pred_items] max_k = k_list[-1] if len(ground_truth) > max_k: ndcg_gold = [1] * max_k else: ndcg_gold = [1] * len(ground_truth) + [0] * (max_k - len(ground_truth)) res_score = [] for metric in metrics: if metric == "ndcg": score_func = ndcg_score elif metric == "precision": score_func = precision_score elif metric == "recall": score_func = recall_score else: raise Exception("Not Found Metric : {}".format(metric)) for k in k_list: if metric == "ndcg": res_score.append(score_func(ndcg_gold[:k], pred_match[:k])) else: res_score.append(score_func(ground_truth, pred_match[:k])) return res_score def evaluate_mean_global_metrics(user_items_dict, user_mask_items_dict, user_embedding, item_embedding, k_list=[10, 20], metrics=["ndcg"]):
@MODELS.register_module()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: KyanChen/TTP # Path: mmseg/utils/typing_utils.py # Path: opencd/registry.py MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['opencd.models']) # Path: opencd/models/data_preprocessor.py from numbers import Number from typing import Any, Dict, List, Optional, Sequence, Union from mmengine.model import BaseDataPreprocessor from mmseg.utils import SampleList from opencd.registry import MODELS import numpy as np import torch import torch.nn.functional as F # Copyright (c) Open-CD. All rights reserved. def stack_batch(inputs: List[torch.Tensor], data_samples: Optional[SampleList] = None, size: Optional[tuple] = None, size_divisor: Optional[int] = None, pad_val: Union[int, float] = 0, seg_pad_val: Union[int, float] = 255) -> torch.Tensor: """Stack multiple inputs to form a batch and pad the images and gt_sem_segs to the max shape use the right bottom padding mode. Args: inputs (List[Tensor]): The input multiple tensors. each is a CHW 3D-tensor. data_samples (list[:obj:`SegDataSample`]): The list of data samples. It usually includes information such as `gt_sem_seg`. size (tuple, optional): Fixed padding size. size_divisor (int, optional): The divisor of padded size. pad_val (int, float): The padding value. Defaults to 0 seg_pad_val (int, float): The padding value. Defaults to 255 Returns: Tensor: The 4D-tensor. List[:obj:`SegDataSample`]: After the padding of the gt_seg_map. """ assert isinstance(inputs, list), \ f'Expected input type to be list, but got {type(inputs)}' assert len({tensor.ndim for tensor in inputs}) == 1, \ f'Expected the dimensions of all inputs must be the same, ' \ f'but got {[tensor.ndim for tensor in inputs]}' assert inputs[0].ndim == 3, f'Expected tensor dimension to be 3, ' \ f'but got {inputs[0].ndim}' assert len({tensor.shape[0] for tensor in inputs}) == 1, \ f'Expected the channels of all inputs must be the same, ' \ f'but got {[tensor.shape[0] for tensor in inputs]}' # only one of size and size_divisor should be valid assert (size is not None) ^ (size_divisor is not None), \ 'only one of size and size_divisor should be valid' padded_inputs = [] padded_samples = [] inputs_sizes = [(img.shape[-2], img.shape[-1]) for img in inputs] max_size = np.stack(inputs_sizes).max(0) if size_divisor is not None and size_divisor > 1: # the last two dims are H,W, both subject to divisibility requirement max_size = (max_size + (size_divisor - 1)) // size_divisor * size_divisor for i in range(len(inputs)): tensor = inputs[i] if size is not None: width = max(size[-1] - tensor.shape[-1], 0) height = max(size[-2] - tensor.shape[-2], 0) # (padding_left, padding_right, padding_top, padding_bottom) padding_size = (0, width, 0, height) elif size_divisor is not None: width = max(max_size[-1] - tensor.shape[-1], 0) height = max(max_size[-2] - tensor.shape[-2], 0) padding_size = (0, width, 0, height) else: padding_size = [0, 0, 0, 0] # pad img pad_img = F.pad(tensor, padding_size, value=pad_val) padded_inputs.append(pad_img) # pad gt_sem_seg if data_samples is not None: data_sample = data_samples[i] gt_sem_seg = data_sample.gt_sem_seg.data del data_sample.gt_sem_seg.data data_sample.gt_sem_seg.data = F.pad( gt_sem_seg, padding_size, value=seg_pad_val) if 'gt_edge_map' in data_sample: gt_edge_map = data_sample.gt_edge_map.data del data_sample.gt_edge_map.data data_sample.gt_edge_map.data = F.pad( gt_edge_map, padding_size, value=seg_pad_val) if 'gt_seg_map_from' in data_sample: gt_seg_map_from = data_sample.gt_seg_map_from.data del data_sample.gt_seg_map_from.data data_sample.gt_seg_map_from.data = F.pad( gt_seg_map_from, padding_size, value=seg_pad_val) if 'gt_seg_map_to' in data_sample: gt_seg_map_to = data_sample.gt_seg_map_to.data del data_sample.gt_seg_map_to.data data_sample.gt_seg_map_to.data = F.pad( gt_seg_map_to, padding_size, value=seg_pad_val) data_sample.set_metainfo({ 'img_shape': tensor.shape[-2:], 'pad_shape': data_sample.gt_sem_seg.shape, 'padding_size': padding_size }) padded_samples.append(data_sample) else: padded_samples.append( dict( img_padding_size=padding_size, pad_shape=pad_img.shape[-2:])) return torch.stack(padded_inputs, dim=0), padded_samples
await free(str(phone_number).replace("+", ""))
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: N0rz3/Phunter # Path: lib/free_lookup.py async def free(phone_number): r = await Request("https://free-lookup.net/{}".format(phone_number), headers={'user-agent': random.choice(agent)}).get() html_body = BeautifulSoup(r.text, "html.parser") list_info = html_body.findChild("ul", class_="report-summary__list").findAll("div") info_dict = { k.text.strip(): info.text.strip() if info.text.strip() else "Not found" for _, (k, info) in enumerate(zip(list_info[::2], list_info[1::2])) } print(f"\n [{GREEN}>{WHITE}] Free-lookup") for key, value in info_dict.items(): if value != "Not found": print(f" ├── {key}: {value}") else: continue # Path: lib/spam.py async def spamcalls(p_n): print(f"\n [{GREEN}>{WHITE}] Spamcalls") url = f"https://spamcalls.net/en/number/{p_n}" r = await Request(url, headers={'user-agent': random.choice(user_agent)}).get() if r.status_code == 200: print(f" └── {RED}!{WHITE} Spammer") else: print(f" └── {GREEN}>{WHITE} Not spammer") # Path: lib/lookup.py import phonenumbers import json from phonenumbers import carrier from .reputation import * from .free_lookup import free from .spam import spamcalls from lib.text import * async def lookup(phone_number): print() parsed = phonenumbers.parse(phone_number) operator = carrier.name_for_number(parsed, "fr") line = phonenumbers.number_type(parsed) if line == phonenumbers.PhoneNumberType.FIXED_LINE: ligne = f" [{GREEN}>{WHITE}] Line type: Fixed" elif line == phonenumbers.PhoneNumberType.MOBILE: ligne = f" [{GREEN}>{WHITE}] Line type: Mobile" else: ligne = " [-] Line not found" possible = phonenumbers.is_possible_number(parsed) valid = phonenumbers.is_valid_number(parsed) with open("lib/country.json", "r") as file: read = json.load(file) d = 0 countrys = [] for country, code in read.items(): d += 1 if phone_number.startswith(code): countrys.append(country) if d == 153: break else: continue else: continue print(f"{WHITE}📞 Phone number: {BLUE}{phone_number}{WHITE}") if possible == True: pos = {"possible": "✔️"} else: pos = {"possible": "❌"} if valid == True: val = {"valid": "✔️"} else: val = {"valid": "❌"} print(f" [{GREEN}>{WHITE}] Possible: {pos['possible']}") print(f" [{GREEN}>{WHITE}] Valid: {val['valid']}") print() if operator != "": print(f" [{GREEN}>{WHITE}] Operator: {operator}") else: print(f" [-] Not Operator") try: print(f" [{GREEN}>{WHITE}] Possible location: " + str(countrys).replace("[", "").replace("]", "").replace("'", "")) except: print(f" [-] Not location") print(ligne) await reputation(phone_number)
coordinator = hass.data[DOMAIN][DATA_COORDINATORS][COORDINATOR_CHARGESESSIONS]
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: dan-r/HomeAssistant-Ohme # Path: custom_components/ohme/const.py DOMAIN = "ohme" # Path: custom_components/ohme/const.py DATA_COORDINATORS = "coordinators" # Path: custom_components/ohme/const.py COORDINATOR_CHARGESESSIONS = 0 # Path: custom_components/ohme/const.py COORDINATOR_ADVANCED = 3 # Path: custom_components/ohme/const.py DATA_CLIENT = "client" # Path: custom_components/ohme/coordinator.py class OhmeChargeSessionsCoordinator(DataUpdateCoordinator): """Coordinator to pull main charge state and power/current draw.""" def __init__(self, hass): """Initialise coordinator.""" super().__init__( hass, _LOGGER, name="Ohme Charge Sessions", update_interval=timedelta(seconds=30), ) self._client = hass.data[DOMAIN][DATA_CLIENT] async def _async_update_data(self): """Fetch data from API endpoint.""" try: return await self._client.async_get_charge_sessions() except BaseException: raise UpdateFailed("Error communicating with API") # Path: custom_components/ohme/coordinator.py class OhmeAdvancedSettingsCoordinator(DataUpdateCoordinator): """Coordinator to pull CT clamp reading.""" def __init__(self, hass): """Initialise coordinator.""" super().__init__( hass, _LOGGER, name="Ohme Advanced Settings", update_interval=timedelta(minutes=1), ) self._client = hass.data[DOMAIN][DATA_CLIENT] async def _async_update_data(self): """Fetch data from API endpoint.""" try: return await self._client.async_get_advanced_settings() except BaseException: raise UpdateFailed("Error communicating with API") # Path: custom_components/ohme/utils.py def charge_graph_in_slot(charge_start, points, skip_format=False): """Are we currently in a charge slot?""" now = int(time()) data = points if skip_format else _format_charge_graph(charge_start, points) # Loop through every value, skipping the last for idx in range(0, len(data) - 1): # This is our current point if data[idx]["t"] < now and data[idx + 1]["t"] > now: # If the delta line we are on is steeper than 10, # we are in a charge slot. if data[idx + 1]["y"] - data[idx]["y"] > 10: return True break return False # Path: custom_components/ohme/binary_sensor.py import logging from homeassistant.components.binary_sensor import ( BinarySensorDeviceClass, BinarySensorEntity ) from homeassistant.helpers.update_coordinator import CoordinatorEntity from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.entity import generate_entity_id from homeassistant.util.dt import (utcnow) from .const import DOMAIN, DATA_COORDINATORS, COORDINATOR_CHARGESESSIONS, COORDINATOR_ADVANCED, DATA_CLIENT from .coordinator import OhmeChargeSessionsCoordinator, OhmeAdvancedSettingsCoordinator from .utils import charge_graph_in_slot """Platform for sensor integration.""" from __future__ import annotations _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: core.HomeAssistant, config_entry: config_entries.ConfigEntry, async_add_entities, ): """Setup sensors and configure coordinator.""" client = hass.data[DOMAIN][DATA_CLIENT]
) -> Union[IPResponse, None]:
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Almas-Ali/SpyIP # Path: spyip/exceptions.py class TooManyRequests(Exception): pass # Path: spyip/exceptions.py class ConnectionTimeout(Exception): pass # Path: spyip/exceptions.py class StatusError(Exception): pass # Path: spyip/models.py class IPResponse(BaseModel): """ Example response from API: { "status": "success", "continent": "Asia", "continentCode": "AS", "country": "India", "countryCode": "IN", "region": "DL", "regionName": "National Capital Territory of Delhi", "city": "New Delhi", "district": "", "zip": "110001", "lat": 28.6139, "lon": 77.209, "timezone": "Asia/Kolkata", "offset": 19800, "currency": "INR", "isp": "Google LLC", "org": "Google LLC", "as": "AS15169 Google LLC", "asname": "GOOGLE", "mobile": false, "proxy": false, "hosting": true, "query": "142.250.193.206", } """ status: str = Field(..., description='Status of the request.') continent: str = Field(..., description='Continent name.') continentCode: str = Field(..., description='Continent code.') country: str = Field(..., description='Country name.') countryCode: str = Field(..., description='Country code.') region: str = Field(..., description='Region code.') regionName: str = Field(..., description='Region name.') city: str = Field(..., description='City name.') district: str = Field(..., description='District name.') zip_: str = Field(..., description='Zip code.') lat: float = Field(..., description='Latitude.') lon: float = Field(..., description='Longitude.') timezone: str = Field(..., description='Timezone.') offset: int = Field(..., description='Offset.') currency: str = Field(..., description='Currency.') isp: str = Field(..., description='ISP name.') org: str = Field(..., description='Organization name.') as_: str = Field(..., description='AS number and name.') asname: str = Field(..., description='AS name.') mobile: bool = Field(..., description='Mobile status.') proxy: bool = Field(..., description='Proxy status.') hosting: bool = Field(..., description='Hosting status.') query: str = Field(..., description='IP address.') class Config: def alias_generator(x): return x.replace('_', '') populate_by_name = True # fields = { # Alias for reserved keywords # "as_": "as", # "zip_": "zip", # } @field_validator('status') def check_status(cls, v): if v != 'success': raise ValueError('Invalid IP address.') return v def json(self, **kwargs) -> str: return self.model_dump_json(**kwargs) # Path: spyip/models.py class DNSResponse(BaseModel): """ Example response from API: "dns": { "ip": "74.125.73.83", "geo": "United States - Google" } """ ip: str = Field(..., description='IP address.') geo: str = Field(..., description='Geo location.') def json(self, **kwargs) -> str: return self.model_dump_json(**kwargs) # Path: spyip/backend.py from typing import List, Union from .exceptions import ( TooManyRequests, ConnectionTimeout, StatusError, ) from .models import ( IPResponse, DNSResponse, ) import asyncio import random import string import httpx def get_random_string(length: int = 32) -> str: """Generate a random string of fixed length.""" letters = string.ascii_lowercase + string.digits return ''.join(random.sample(letters, length)) # API endpoints for IP address lookup trace_me_url = 'http://ip-api.com/json/' trace_ip_url = 'http://ip-api.com/json/%(query)s' trace_dns_url = f'http://{get_random_string(32)}.edns.ip-api.com/json/' trace_ip_batch_url = 'http://ip-api.com/batch' headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.5', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0', } def trace_me( timeout: int = 5, lang: str = 'en',
process_task(fake_job, fake_pipeline, fake_executor, fake_path, parallel_exec=True)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: leopedroso45/Stable-Diffusion-ImageGen # Path: sevsd/process_task.py def check_cuda_and_clear_cache(): r""" Clears the CUDA cache if available, otherwise performs garbage collection. This function is called to manage memory usage, particularly when working with large models or multiple image generations. """ if torch.cuda.is_available(): torch.cuda.empty_cache() else: gc.collect() # Path: sevsd/process_task.py def process_task(job, pipeline, executor, path, parallel_exec=True): r""" Processes a single image generation job using the specified pipeline and execution parameters. This function handles the generation of one or more images based on a given job description. It supports both parallel and sequential execution modes. Generated images are saved to the specified path. Parameters: job (dict): A dictionary containing details for the image generation task. It includes 'prompt' and optionally 'negative_prompt'. pipeline (callable): The Stable Diffusion pipeline callable used for generating images. executor (dict): A dictionary containing execution parameters such as 'num_of_exec', 'cfg_scale', and 'inference_steps'. path (str): The directory path where generated images will be saved. parallel_exec (bool, optional): If True, generates all specified images in parallel. Defaults to True. The function saves each generated image with a unique timestamp in the specified path and prints the save location. In case of any exceptions, they are caught and printed. Example: job = { "prompt": "A scenic landscape", "negative_prompt": "blurred image, black and white, watermarked image" } executor = { "num_of_exec": 2, "cfg_scale": 7, "inference_steps": 50 } pipeline = setup_pipeline("CompVis/stable-diffusion-v1-4") process_task(job, pipeline, executor, "./generated-images", parallel_exec=False) Note: This function also handles CUDA cache clearing and garbage collection for memory management. """ def call_generate_image(): images = generate_image(job, pipeline, executor, parallel_exec) if images is not None: for image in images: timestamp = datetime.now().strftime("%Y%m%d_%H%M%S%f") image_path = f"{path}/generated_image_{timestamp}.png" image.save(image_path) print(f"[sevsd] - image saved at {image_path}") else: print("[sevsd] - image generation failed due to memory constraints.") check_cuda_and_clear_cache() try: path = check_os_path(path) if job is not None: if parallel_exec is not True: num_images = executor.get("num_of_exec", 1) for _ in range(num_images): call_generate_image() else: call_generate_image() except Exception as e: print(f"[sevsd] - exception: {e}") finally: check_cuda_and_clear_cache() # Path: sevsd/process_task.py def check_os_path(path): r""" Checks if the given path exists, and if not, creates the necessary directories. This function ensures that the output path for saving images is available. Parameters: path (str): The directory path to check and create if necessary. Returns: str: The verified or created directory path. """ if not os.path.exists(path): os.makedirs(path) print(f"[sevsd] - created path: {path}") return path # Path: tests/test_process_task.py import unittest import sys from unittest.mock import patch, MagicMock from sevsd.process_task import check_cuda_and_clear_cache, process_task, check_os_path sys.path.append('../') class TestProcessTask(unittest.TestCase): @patch('sevsd.process_task.generate_image') def test_process_task(self, mock_generate_image): mock_image = MagicMock() mock_image.save = MagicMock() mock_generate_image.return_value = [mock_image] fake_job = {"prompt": "prompt", "details": (None, 50, 1, 7.5)} fake_pipeline = MagicMock() fake_executor = {"num_of_exec": 1, "cfg_scale": 7} fake_path = "test_path"
hooks.append(Hook(t, lambda grad: grad.T))
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Emperor-WS/PyEmber # Path: ember/autograd/hook.py class Hook: """ Hook class for attaching gradient functions to tensors. Hooks allow users to attach custom gradient functions to tensors for monitoring or modifying gradients during backpropagation. Attributes: - tensor (Tensor): The target tensor. - grad_fn (callable): The gradient function to be applied to the tensor. Methods: - __init__(self, tensor, grad_fn): Constructor for Hook class. - __repr__(self): String representation of the Hook instance. """ __slots__ = 'tensor', 'grad_fn' def __init__(self, tensor, grad_fn): """ Constructor for the Hook class. Args: - tensor (Tensor): The target tensor. - grad_fn (callable): The gradient function to be applied to the tensor. """ self.tensor = tensor self.grad_fn = grad_fn def __repr__(self): """ String representation of the Hook instance. Returns: - str: A string containing information about the tensor and its associated gradient function. """ # Extract the class name from the qualified name of the gradient function grad_name = self.grad_fn.__qualname__.split('.')[0] return f"Hook(tensor_id={self.tensor.id}, grad_fn={grad_name.upper()})" # Path: ember/autograd/_utils.py def numpy_unpad(x, pad_width): """ Remove padding from an array. Args: - x (numpy.ndarray): Input array. - pad_width (tuple of ints): Amount of padding on each dimension. Returns: - numpy.ndarray: Unpadded array. """ slices = [] for pad in pad_width: end = None if pad[1] == 0 else -pad[1] slices.append(slice(pad[0], end )) return x[tuple(slices)] # Path: ember/autograd/_utils.py def inv_permutation(permutation): """ Compute the inverse of a permutation. Args: - permutation (list): List representing a permutation. Returns: - list: Inverse permutation. """ inverse = [0] * len(permutation) for original_idx, permuted_idx in enumerate(permutation): inverse[permuted_idx] = original_idx return inverse # Path: ember/autograd/numeric.py import numpy as np import ember from .hook import Hook from ._utils import numpy_unpad, inv_permutation def _T(t): """ Transpose operation on the input tensor. Args: - t: Input tensor. Returns: - Tensor: Resultant tensor with the transpose operation applied. """ t = ember.to_tensor(t) # Convert the input tensor to a Tensor data = t.data.T # Transpose operation requires_grad = t.requires_grad # Set requires_grad based on input tensor hooks = [] # Register a hook for gradient computation if the input tensor requires it if requires_grad:
labels_dict = read_yaml(parsed_args.params)["labels_mapping"]
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Hassi34/iot-device-identification # Path: src/utils/common.py def read_yaml(path_to_yaml: str) -> dict: with open(path_to_yaml) as yaml_file: content = yaml.safe_load(yaml_file) return content # Path: src/utils/sys_logging.py def get_logger(logs_filepath: str): logger.add( logs_filepath, format="{time} | {level} | {name}.{module}:{line} | {message}", level="DEBUG", rotation="10 KB", retention="10 days", compression="zip", colorize=True, enqueue=True, catch=True, encoding="utf-8", ) return logger # Path: src/utils/common.py def write_dict_to_yaml(dict_input: dict, yaml_file_path: str): try: current_file_data = read_yaml(yaml_file_path) current_file_data.update(dict_input) with open(yaml_file_path, "w") as f: yaml.dump(current_file_data, f) except (FileNotFoundError , AttributeError): with open(yaml_file_path, "w") as f: yaml.dump(dict_input, f) # Path: src/utils/data_ops.py def gzip_np_arr(np_array: np.ndarray, filepath: str): with gzip.GzipFile(filepath, "w") as f: np.save(file=f, arr=np_array) # Path: src/utils/data_ops.py def get_fitted_pipeline(df, columns, KNN_IMPUTER_NEIGHBORS: int = 3): ct = ColumnTransformer( transformers=[("input_features", "passthrough", columns)], remainder="drop" ) imputer = KNNImputer(n_neighbors=KNN_IMPUTER_NEIGHBORS) scaler = StandardScaler() pipeline = Pipeline( steps=[("select_columns", ct), ("imputer", imputer), ("scaler", scaler)] ) return pipeline.fit(df) # Path: src/stage_03_preprocess_data.py import argparse import joblib import pandas as pd from src.utils.common import read_yaml from src.utils.sys_logging import get_logger from sklearn.preprocessing import LabelEncoder from src.utils.common import write_dict_to_yaml from src.utils.data_ops import gzip_np_arr from sklearn.model_selection import train_test_split from src.utils.data_ops import get_fitted_pipeline from pathlib import Path STAGE = "Preprocess Data" def preprocess_data(): complete_df = pd.read_parquet(RAW_DATA_FILE_PATH) logger.info( f'The raw data file has been loaded from "{RAW_DATA_FILE_PATH}" with the shape "{complete_df.shape}"' ) duplicate_rows = complete_df.duplicated().sum() if duplicate_rows > 0: logger.warning( f"Found {duplicate_rows} duplicate rows, removing duplicate rows..." ) complete_df = complete_df.drop_duplicates(keep="first") X = complete_df.drop([TARGET_COLUMN_NAME], axis=1) y = complete_df[TARGET_COLUMN_NAME] feature_cols = params["input_features_schema"] feature_cols = list(feature_cols.keys()) logger.info(f"Read {len(feature_cols)} feature columns from params") data_processing_pipeline = get_fitted_pipeline( X, feature_cols, KNN_IMPUTER_NEIGHBORS=KNN_IMPUTER_NEIGHBORS ) Path(DATA_PREPROCESSING_PIPELINE_FILE_PATH).parent.absolute().mkdir(parents=True, exist_ok=True) joblib.dump(data_processing_pipeline, DATA_PREPROCESSING_PIPELINE_FILE_PATH, compress=1) logger.info(f"Saved the preprocessing pipeline to {DATA_PREPROCESSING_PIPELINE_FILE_PATH}") data_processing_pipeline = joblib.load(DATA_PREPROCESSING_PIPELINE_FILE_PATH) data_processing_pipeline data_processing_pipeline = joblib.load(DATA_PREPROCESSING_PIPELINE_FILE_PATH) logger.info( f'Loaded sklearn data preprocessing pipeline from "{DATA_PREPROCESSING_PIPELINE_FILE_PATH}"' ) X_transformed = data_processing_pipeline.transform(X) logger.info(f'Dataframe shape after transformation is "{X_transformed.shape}"') le = LabelEncoder() le.fit(y) labels_mapping_dict = {"labels_mapping": ""} le_dict = dict(zip(le.transform(le.classes_), le.classes_)) le_dict = {int(k): v for k, v in le_dict.items()} labels_mapping_dict["labels_mapping"] = le_dict logger.info(f"Label encoding map has the dictionary: {le_dict}") write_dict_to_yaml(labels_mapping_dict, parsed_args.params) logger.info(f'Updated the label encoding map in the file at "{parsed_args.params}"')
text = "".join(text2sep_kata(text)[0])
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: see2023/Bert-VITS2-ext # Path: config.py class Resample_config: class Preprocess_text_config: class Bert_gen_config: class Emo_gen_config: class Train_ms_config: class Webui_config: class Server_config: class Translate_config: class Config: def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, transcription_path: str, cleaned_path: str, train_path: str, val_path: str, config_path: str, val_per_lang: int = 5, max_val_total: int = 10000, clean: bool = True, ): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, config_path: str, num_processes: int = 2, device: str = "cuda", use_multi_device: bool = False, ): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, config_path: str, num_processes: int = 2, device: str = "cuda", use_multi_device: bool = False, ): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, config_path: str, env: Dict[str, any], base: Dict[str, any], model: str, num_workers: int, spec_cache: bool, keep_ckpts: int, ): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, device: str, model: str, v_model: str, config_path: str, language_identification_library: str, port: int = 7860, share: bool = False, debug: bool = False, ): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, models: List[Dict[str, any]], port: int = 5000, device: str = "cuda" ): def from_dict(cls, data: Dict[str, any]): def __init__(self, app_key: str, secret_key: str): def from_dict(cls, data: Dict[str, any]): def __init__(self, config_path: str): # Path: text/japanese.py def text2sep_kata(text: str) -> (list, list): parsed = pyopenjtalk.run_frontend(text) res = [] sep = [] for parts in parsed: word, yomi = replace_punctuation(parts["string"]), parts["pron"].replace( "’", "" ) if yomi: if re.match(_MARKS, yomi): if len(word) > 1: word = [replace_punctuation(i) for i in list(word)] yomi = word res += yomi sep += word continue elif word not in rep_map.keys() and word not in rep_map.values(): word = "," yomi = word res.append(yomi) else: if word in _SYMBOL_TOKENS: res.append(word) elif word in ("っ", "ッ"): res.append("ッ") elif word in _NO_YOMI_TOKENS: pass else: res.append(word) sep.append(word) return sep, [hira2kata(i) for i in res], get_accent(parsed) # Path: for_deploy/infer_utils.py import sys import torch from transformers import ( AutoModelForMaskedLM, AutoTokenizer, DebertaV2Model, DebertaV2Tokenizer, ClapModel, ClapProcessor, ) from config import config from text.japanese import text2sep_kata class BertFeature: def __init__(self, model_path, language="ZH"): self.model_path = model_path self.language = language self.tokenizer = None self.model = None self.device = None self._prepare() def _get_device(self, device=config.bert_gen_config.device): if ( sys.platform == "darwin" and torch.backends.mps.is_available() and device == "cpu" ): device = "mps" if not device: device = "cuda" return device def _prepare(self): self.device = self._get_device() if self.language == "EN": self.tokenizer = DebertaV2Tokenizer.from_pretrained(self.model_path) self.model = DebertaV2Model.from_pretrained(self.model_path).to(self.device) else: self.tokenizer = AutoTokenizer.from_pretrained(self.model_path) self.model = AutoModelForMaskedLM.from_pretrained(self.model_path).to( self.device ) self.model.eval() def get_bert_feature(self, text, word2ph): if self.language == "JP":
color = get_activation(self.cfg.color_activation)(features)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: chinhsuanwu/ifusion-threestudio # Path: threestudio/models/materials/base.py class BaseMaterial(BaseModule): @dataclass class Config(BaseModule.Config): pass cfg: Config requires_normal: bool = False requires_tangent: bool = False def configure(self): pass def forward(self, *args, **kwargs) -> Float[Tensor, "*B 3"]: raise NotImplementedError def export(self, *args, **kwargs) -> Dict[str, Any]: return {} # Path: threestudio/models/networks.py def get_encoding(n_input_dims: int, config) -> nn.Module: # input suppose to be range [0, 1] encoding: nn.Module if config.otype == "ProgressiveBandFrequency": encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config)) elif config.otype == "ProgressiveBandHashGrid": encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config)) elif config.otype == "HashGridSpatialTime": encoding = TCNNEncodingSpatialTime(n_input_dims, config) # 4D-fy encoding else: encoding = TCNNEncoding(n_input_dims, config_to_primitive(config)) encoding = CompositeEncoding( encoding, include_xyz=config.get("include_xyz", False), xyz_scale=2.0, xyz_offset=-1.0, ) # FIXME: hard coded return encoding # Path: threestudio/models/networks.py def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module: network: nn.Module if config.otype == "VanillaMLP": network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config)) elif config.otype == "SphereInitVanillaMLP": network = SphereInitVanillaMLP( n_input_dims, n_output_dims, config_to_primitive(config) ) else: assert ( config.get("sphere_init", False) is False ), "sphere_init=True only supported by VanillaMLP" network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config)) return network # Path: threestudio/utils/ops.py def dot(x, y): return torch.sum(x * y, -1, keepdim=True) # Path: threestudio/utils/ops.py def get_activation(name) -> Callable: if name is None: return lambda x: x name = name.lower() if name == "none": return lambda x: x elif name == "lin2srgb": return lambda x: torch.where( x > 0.0031308, torch.pow(torch.clamp(x, min=0.0031308), 1.0 / 2.4) * 1.055 - 0.055, 12.92 * x, ).clamp(0.0, 1.0) elif name == "exp": return lambda x: torch.exp(x) elif name == "shifted_exp": return lambda x: torch.exp(x - 1.0) elif name == "trunc_exp": return trunc_exp elif name == "shifted_trunc_exp": return lambda x: trunc_exp(x - 1.0) elif name == "sigmoid": return lambda x: torch.sigmoid(x) elif name == "tanh": return lambda x: torch.tanh(x) elif name == "shifted_softplus": return lambda x: F.softplus(x - 1.0) elif name == "scale_-11_01": return lambda x: x * 0.5 + 0.5 else: try: return getattr(F, name) except AttributeError: raise ValueError(f"Unknown activation function: {name}") # Path: threestudio/models/materials/no_material.py import random import torch import torch.nn as nn import torch.nn.functional as F import threestudio from dataclasses import dataclass, field from threestudio.models.materials.base import BaseMaterial from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import dot, get_activation from threestudio.utils.typing import * @threestudio.register("no-material") class NoMaterial(BaseMaterial): @dataclass class Config(BaseMaterial.Config): n_output_dims: int = 3 color_activation: str = "sigmoid" input_feature_dims: Optional[int] = None mlp_network_config: Optional[dict] = None requires_normal: bool = False cfg: Config def configure(self) -> None: self.use_network = False if ( self.cfg.input_feature_dims is not None and self.cfg.mlp_network_config is not None ): self.network = get_mlp( self.cfg.input_feature_dims, self.cfg.n_output_dims, self.cfg.mlp_network_config, ) self.use_network = True self.requires_normal = self.cfg.requires_normal def forward( self, features: Float[Tensor, "B ... Nf"], **kwargs ) -> Float[Tensor, "B ... Nc"]: if not self.use_network: assert ( features.shape[-1] == self.cfg.n_output_dims ), f"Expected {self.cfg.n_output_dims} output dims, only got {features.shape[-1]} dims input."
origin_sync += f'{TEXT["bright_green"]}{glyph("ahead")} {ahead}{RESET}'
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: jasursadikov/mud # Path: utils.py TEXT = { 'white': '\033[37m', 'gray': '\033[90m', 'black': '\033[30m', 'red': '\033[31m', 'green': '\033[32m', 'yellow': '\033[33m', 'blue': '\033[34m', 'magenta': '\033[35m', 'cyan': '\033[36m', 'bright_white': '\033[97m', 'bright_red': '\033[91m', 'bright_green': '\033[92m', 'bright_yellow': '\033[93m', 'bright_blue': '\033[94m', 'bright_magenta': '\033[95m', 'bright_cyan': '\033[96m', } # Path: utils.py BACK = { 'white': '\033[47m', 'medium_gray': '\033[100m', 'black': '\033[40m', 'red': '\033[41m', 'green': '\033[42m', 'yellow': '\033[43m', 'blue': '\033[44m', 'magenta': '\033[45m', 'cyan': '\033[46m', 'bright_white': '\033[107m', 'bright_red': '\033[101m', 'bright_green': '\033[102m', 'bright_yellow': '\033[103m', 'bright_blue': '\033[104m', 'bright_magenta': '\033[105m', 'bright_cyan': '\033[106m', } # Path: utils.py RESET = '\033[0m' # Path: utils.py STYLES = { 'bold': '\033[1m', 'dim': '\033[2m', 'italic': '\033[3m', 'underline': '\033[4m', 'blink': '\033[5m', } # Path: utils.py END_STYLES = { 'bold': '\033[22m', 'dim': '\033[22m', 'italic': '\033[23m', 'underline': '\033[24m', 'blink': '\033[25m', } # Path: utils.py def glyph(key: str) -> str: return GLYPHS[key][0] if settings.mud_settings['nerd_fonts'] else GLYPHS[key][1] # Path: commands.py import utils import asyncio import subprocess from utils import TEXT, BACK, RESET, STYLES, END_STYLES, glyph from typing import List, Dict from collections import Counter from prettytable import PrettyTable, PLAIN_COLUMNS class Commands: def __init__(self, repos): self.repos = repos self.label_color_cache = {} self.current_color_index = 0 # `mud status` command implementation def status(self, repos: Dict[str, List[str]]) -> None: table = self._get_table() for path, tags in repos.items(): formatted_path = self._get_formatted_path(path) branch = self._get_branch_status(path) author = self._get_authors_name(path) commit = self._get_commit_message(path, 30) colored_labels = self._get_formatted_labels(tags) # Sync with origin status ahead_behind_cmd = subprocess.run(['git', 'rev-list', '--left-right', '--count', 'HEAD...@{upstream}'], text=True, cwd=path, capture_output=True) stdout = ahead_behind_cmd.stdout.strip().split() if len(stdout) >= 2: ahead, behind = stdout[0], stdout[1] origin_sync = '' if ahead and ahead != '0':
self.mm_projector = build_vision_projector(config)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Q-MM/PureMM # Path: model/multimodal_encoder/builder.py def build_vision_tower(vision_tower_cfg, **kwargs): vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None)) is_absolute_path_exists = os.path.exists(vision_tower) if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion"): return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs) raise ValueError(f'Unknown vision tower: {vision_tower}') # Path: model/multimodal_projector/builder.py def build_vision_projector(config, delay_load=False, **kwargs): projector_type = getattr(config, 'mm_projector_type', 'linear') if projector_type == 'linear': return nn.Linear(config.mm_hidden_size, config.hidden_size) mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type) if mlp_gelu_match: mlp_depth = int(mlp_gelu_match.group(1)) modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)] for _ in range(1, mlp_depth): modules.append(nn.GELU()) modules.append(nn.Linear(config.hidden_size, config.hidden_size)) return nn.Sequential(*modules) larger_mlp_gelu_match = re.match(r'^larger_mlp(\d+)x_gelu$', projector_type) if larger_mlp_gelu_match: mlp_depth = int(mlp_gelu_match.group(1)) modules = [nn.Linear(config.mm_hidden_size, config.mm_hidden_size)] for _ in range(1, mlp_depth-1): modules.append(nn.GELU()) modules.append(nn.Linear(config.mm_hidden_size, config.mm_hidden_size)) modules.append(nn.Linear(config.mm_hidden_size, config.hidden_size)) return nn.Sequential(*modules) if projector_type == 'identity': return IdentityMap() raise ValueError(f'Unknown projector type: {projector_type}') # Path: model/PureMM_arch.py from abc import ABC, abstractmethod from .multimodal_encoder.builder import build_vision_tower from .multimodal_projector.builder import build_vision_projector import torch import torch.nn as nn # Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. IGNORE_INDEX = -100 IMAGE_TOKEN_INDEX = -200 DEFAULT_IMAGE_TOKEN = "<image>" DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>" DEFAULT_IM_START_TOKEN = "<im_start>" DEFAULT_IM_END_TOKEN = "<im_end>" def rank0_print(rank, *args): if rank == 0: print(*args) class PureMMMetaModel: def __init__(self, config): super(PureMMMetaModel, self).__init__(config) if hasattr(config, "mm_vision_tower"): self.vision_tower = build_vision_tower(config, delay_load=True) # self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size)
return SpotifyApi(os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET"))
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Ananya2001-an/spotify-py-sdk # Path: spotify_py_sdk/spotify_api.py class SpotifyApi: """Create an api instance and call the various endpoint methods. :param client_id: Client_ID for your app :type client_id: str :param client_secret: Client_Secret for your app :type client_secret: str :param config: pass :class:`SdkConfig` instance, defaults to None :type config: :class:`SdkConfig`, optional """ _root_url: str = "https://api.spotify.com/v1/" def __init__(self, client_id: str, client_secret: str, config: Optional[SdkConfig] = None): """Constructor method """ self.access_token_manager: AccessTokenManager = AccessTokenManager(client_id, client_secret) self.sdk_config: Optional[SdkConfig] = config self.albums: Albums = Albums(self) self.artists: Artists = Artists(self) self.audiobooks: Audiobooks = Audiobooks(self) self.browse: Browse = Browse(self) self.chapters: Chapters = Chapters(self) self.episodes: Episodes = Episodes(self) self.recommendations: Recommendations = Recommendations(self) self.markets: Markets = Markets(self) # self.player: Player = Player(self) # need different auth strategy; yet to be implemented self.playlists: Playlists = Playlists(self) self.shows: Shows = Shows(self) self.tracks: Tracks = Tracks(self) self.users: Users = Users(self) self.search: Search = Search(self) # self.current_user: CurrentUser = CurrentUser(self) # need different auth strategy; yet to be implemented @classmethod def fetch_results(cls, url: str, opts: dict): """Fetch results by making a request to the given URL """ try: result = requests.request(method=opts["method"], url=url, headers=opts["headers"], data=opts["body"]) return result.json() except HTTPError as e: raise f"Failed to fetch result! {e}" def make_request(self, method: Literal["GET", "POST", "PUT", "DELETE"], url: str, body: Optional[any] = None, content_type: Optional[str] = None): """Get access token and make necessary request call to the api endpoint """ try: access_token = self.access_token_manager.get_access_token() except HTTPError as e: raise "Access Token not available! Authenticate again." full_url = SpotifyApi._root_url + url opts = { "method": method, "headers": { "Authorization": f"Bearer {access_token}", "Content-Type": content_type if content_type else "application/json" }, "body": json.dumps(body) if body and type(body) is not str else body } try: if self.sdk_config: if self.sdk_config.before_request: self.sdk_config.before_request(full_url, opts) if self.sdk_config.fetch: result = self.sdk_config.fetch(full_url, opts) else: result = SpotifyApi.fetch_results(full_url, opts) if self.sdk_config.after_request: self.sdk_config.after_request(full_url, opts, result) return result return SpotifyApi.fetch_results(full_url, opts) except (HTTPError, ValueError, InterruptedError) as e: raise e # handled = self.sdk_config.error_handler.handleErrors(e) # if not handled: # raise Exception("Failed to make request! Try again.") # Path: spotify_py_sdk/endpoints/recommendations.py class RecommendationsRequestRequiredArguments: def __init__(self, seed_artists: Optional[list[str]] = None, seed_genres: Optional[list[str]] = None, seed_tracks: Optional[list[str]] = None): self.seed_artists = seed_artists self.seed_genres = seed_genres self.seed_tracks = seed_tracks # Path: tests/endpoints/test_recommendations.py import json import pytest import os from spotify_py_sdk import SpotifyApi from spotify_py_sdk.endpoints.recommendations import RecommendationsRequestRequiredArguments from dotenv import load_dotenv load_dotenv() @pytest.fixture def api():
return AsyncSeq2SeqTrainer if training_args.async_grad else Seq2SeqTrainer
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: kyleliang919/Optimizer-Zoo # Path: optimizer_zoo/Trainer/async_trainer.py class AsyncTrainer(Trainer): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.accelerator.sync_gradients = None def training_step(self, model, inputs): # make sure the gradient is not automatically synced with model.no_sync(): model.train() inputs = self._prepare_inputs(inputs) if is_sagemaker_mp_enabled(): loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps) return loss_mb.reduce_mean().detach().to(self.args.device) with self.compute_loss_context_manager(): loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: self.accelerator.backward(loss) return loss.detach() / self.args.gradient_accumulation_steps # Path: optimizer_zoo/Trainer/async_trainer.py class AsyncSFTTrainer(SFTTrainer): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def training_step(self, model, inputs): # make sure the gradient is not automatically synced with model.no_sync(): model.train() inputs = self._prepare_inputs(inputs) if is_sagemaker_mp_enabled(): loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps) return loss_mb.reduce_mean().detach().to(self.args.device) with self.compute_loss_context_manager(): loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: self.accelerator.backward(loss) return loss.detach() / self.args.gradient_accumulation_steps # Path: optimizer_zoo/Trainer/async_trainer.py class AsyncDPOTrainer(DPOTrainer): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def training_step(self, model, inputs): # make sure the gradient is not automatically synced with model.no_sync(): model.train() inputs = self._prepare_inputs(inputs) if is_sagemaker_mp_enabled(): loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps) return loss_mb.reduce_mean().detach().to(self.args.device) with self.compute_loss_context_manager(): loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: self.accelerator.backward(loss) return loss.detach() / self.args.gradient_accumulation_steps # Path: optimizer_zoo/Trainer/async_trainer.py class AsyncSeq2SeqTrainer(Seq2SeqTrainer): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.accelerator.sync_gradients = None def training_step(self, model, inputs): # make sure the gradient is not automatically synced with model.no_sync(): model.train() inputs = self._prepare_inputs(inputs) if is_sagemaker_mp_enabled(): loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps) return loss_mb.reduce_mean().detach().to(self.args.device) with self.compute_loss_context_manager(): loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: self.accelerator.backward(loss) return loss.detach() / self.args.gradient_accumulation_steps # Path: optimizer_zoo/Trainer/utils.py from transformers import Trainer, Seq2SeqTrainer from trl import SFTTrainer, DPOTrainer from .async_trainer import AsyncTrainer, AsyncSFTTrainer, AsyncDPOTrainer, AsyncSeq2SeqTrainer def create_trainer(training_args): if training_args.task == "pretraining": return AsyncTrainer if training_args.async_grad else Trainer elif training_args.task == "sft": return AsyncSFTTrainer if training_args.async_grad else SFTTrainer elif training_args.task == "dpo": return AsyncDPOTrainer if training_args.async_grad else DPOTrainer elif training_args.task == "seq2seq":
residual_block(channel_size=16),
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: giaminhgist/3D-DAM # Path: lib/model/attention_block.py class SpatialAttention3D(nn.Module): def __init__(self, out_channel=64, kernel_size=3, stride=1, padding=1): super(SpatialAttention3D, self).__init__() self.conv = nn.Conv3d(2, out_channel, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): residual = x avg_out = torch.mean(x, dim=1, keepdim=True) max_out, _ = torch.max(x, dim=1, keepdim=True) x = torch.cat([avg_out, max_out], dim=1) x = self.conv(x) x = self.sigmoid(x) out = x * residual return out # Path: lib/model/attention_block.py class ChannelAttention3D(nn.Module): def __init__(self, in_planes=64, ratio=8): super(ChannelAttention3D, self).__init__() self.avg_pool = nn.AdaptiveAvgPool3d(1) self.max_pool = nn.AdaptiveMaxPool3d(1) self.fc = nn.Sequential(nn.Conv3d(in_planes, in_planes // ratio, 1, bias=False), nn.ReLU(), nn.Conv3d(in_planes // ratio, in_planes, 1, bias=False)) self.sigmoid = nn.Sigmoid() def forward(self, x): residual = x avg_out = self.fc(self.avg_pool(x)) max_out = self.fc(self.max_pool(x)) out = avg_out + max_out return self.sigmoid(out) * residual # Path: lib/model/attention_block.py class residual_block(nn.Module): def __init__(self, channel_size=64): super(residual_block, self).__init__() self.conv = nn.Conv3d(channel_size, channel_size, kernel_size=3, padding=1) self.relu = nn.ReLU() self.bn = nn.BatchNorm3d(channel_size) def forward(self, x): residual = x y = self.conv(x) y = self.bn(y) y = self.relu(y) out = y + residual return out # Path: lib/model/DuoAttention.py import numpy as np import torch from torch import nn from lib.model.attention_block import SpatialAttention3D, ChannelAttention3D, residual_block class DAM(nn.Module): def __init__(self, channels=64): super(DAM, self).__init__() self.sa = SpatialAttention3D(out_channel=channels) self.ca = ChannelAttention3D(in_planes=channels) def forward(self, x): residual = x out = self.ca(x) out = self.sa(out) out = out + residual return out class Duo_Attention(nn.Module): def __init__( self, input_size=(1, 169, 208, 179), num_classes=3, dropout=0 ): super().__init__() self.conv = nn.Sequential( nn.Conv3d(input_size[0], 8, 3, padding=1), nn.BatchNorm3d(8), nn.ReLU(), # nn.MaxPool3d(2, 2), nn.Conv3d(8, 16, 3, padding=1, stride=2), nn.BatchNorm3d(16), nn.ReLU(),
decrypted_message = decrypt_message(encrypted_input.encode(), key)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: itsluminous/EasyEncryption # Path: core.py def generate_key(): """Generate a Fernet key.""" return Fernet.generate_key() # Path: core.py def encrypt_message(message, key): """Encrypt a message using the provided key.""" fernet = Fernet(key) encrypted = fernet.encrypt(message.encode()) return encrypted # Path: core.py def decrypt_message(encrypted_message, key): """Decrypt an encrypted message using the provided key.""" fernet = Fernet(key) decrypted = fernet.decrypt(encrypted_message).decode() return decrypted # Path: core.py def encrypt_file(file_path, key): """Encrypt a file using the provided key.""" try: with open(file_path, 'r', encoding='utf-8') as file: content = file.read() encrypted_content = encrypt_message(content, key) with open(file_path + '.enc', 'wb') as encrypted_file: encrypted_file.write(encrypted_content) print(f"\nFile '{file_path}' encrypted successfully.") except FileNotFoundError: print("\nFile not found.") # Path: core.py def decrypt_file(file_path, key): """Decrypt an encrypted file using the provided key.""" try: with open(file_path, 'rb', encoding='utf-8') as file: encrypted_content = file.read() decrypted_content = decrypt_message(encrypted_content, key) decrypted_file_path = file_path[:-4] # Remove the '.enc' extension with open(decrypted_file_path, 'w', encoding='utf-8') as decrypted_file: decrypted_file.write(decrypted_content) print(f"\nFile '{file_path}' decrypted successfully.") except FileNotFoundError: print("\nFile not found.") except ValueError: print("\nInvalid decryption key or file content.") # Path: script.py from core import generate_key, encrypt_message, decrypt_message, encrypt_file, decrypt_file """ Script providing a user interface for encryption and decryption operations. """ def generate_new_key(): """ Generate a new encryption key. Returns: - bytes: New encryption key. """ key = generate_key() print(f"\nGenerated Key: {key.decode()}") return key def enter_user_key(): """ Prompt user to enter a key. Returns: - bytes: User-entered key. """ print("\nEnter the key:") return input().encode() def encrypt_user_message(key): """ Encrypt a user-entered message. Parameters: - key (bytes): Encryption key. """ if key is None: print("\nPlease generate or enter a key first.") else: print("\nEnter a message to encrypt (press Enter twice to finish):") lines = [] while True: line = input() if not line: break lines.append(line) user_input = '\n'.join(lines) encrypted_message = encrypt_message(user_input, key) print(f"\nEncrypted message: {encrypted_message}") def decrypt_user_message(key): """ Decrypt a user-entered message. Parameters: - key (bytes): Decryption key. """ if key is None: print("\nPlease generate or enter a key first.") else: print("\nEnter the encrypted message (press Enter twice to finish):") lines = [] while True: line = input() if not line: break lines.append(line) encrypted_input = '\n'.join(lines)
response = await resource_not_found(obj, exc)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: gardenifi/server # Path: app/main_app.py INVALID_DATA = "Invalid data: Unable to process the provided data" class GlobalVars: class WifiData(BaseModel): class ValveData(BaseModel): class BleData(BaseModel): def __init__(self): def refresh_set(self): def refresh_set(self, value): async def index(): async def resource_not_found(request: Request, exc: HTTPException): async def read_ble_data(page: int = None): async def write_ble_data(data: BleData): async def discover_wifi(chunked: int = None, page: int = None): async def save_wifi(data: WifiData): async def turn(data: ValveData): async def check_mqtt(): def web_server(): def setup_gpio(): def parse_arguments(): def main(): # Path: app/main_app.py @app.exception_handler(404) async def resource_not_found(request: Request, exc: HTTPException): """Not found error.""" logger.error(f"Request: {request}") return JSONResponse(status_code=404, content={"detail": str(exc.detail)}) # Path: tests/api/resource_not_found_test.py import json import pytest from fastapi.testclient import TestClient from fastapi import HTTPException, Request from fastapi.responses import JSONResponse from app.main_app import app from app.main_app import resource_not_found """MIT License Copyright (c) 2023, Marios Karagiannopoulos Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. **Attribution Requirement:** When using or distributing the software, an attribution to Marios Karagiannopoulos must be included. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ client = TestClient(app) scope = {"type": "http", "http_version": "1.1", "method": "GET", "path": "/"} @pytest.fixture(scope="function") async def request_obj(): """Request object creation fixture""" return Request(scope) class TestResourceNotFound: """ Test class for the 'resource_not_found' error handler function. """ @pytest.mark.asyncio async def test_returns_json_response_with_status_code_404_and_detail_of_httpexception(self, obj=request_obj): """ Test for returning a JSONResponse object with status code 404 and the detail of the HTTPException passed as an argument. """ exc = HTTPException(status_code=404, detail="Not found")
Logmanager(args.log)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: xiaoye0x0/pfgo_tg_bot # Path: utils/task/model.py class Task(metaclass=SingletonMeta): def __init__(self, args) -> None: self.conf_file = args.config self.bot_token: str = "" self.pfgo_url: str = "" self.username: str = "" self.password: str = "" self.hide: list = [] self.webhook_url = "" self.webhook_port = "" self.running_host = "" self.running_port = 0 self._init_conf() def _init_conf(self): config = configparser.ConfigParser() config.read(self.conf_file) self.bot_token = config.get("bot", "token") self.pfgo_url = config.get("pfgo", "url") self.username = config.get("pfgo", "username") self.password = config.get("pfgo", "password") self.hide += config.get("pfgo", "hide").split(",") self.webhook_url = config.get("webhook", "webhook_url") self.webhook_port = config.get("webhook", "webhook_port") self.running_host = config.get("webhook", "running_host") self.running_port = int(config.get("webhook", "running_port")) # Path: utils/log.py class Logmanager(metaclass=SingletonMeta): log_list = [] log_list_lock = threading.Lock() path = "./" def __init__(self, path: str) -> None: Logmanager.path = path @classmethod def create_logger(cls, name=None): if name is None: name = "default" logger = logging.getLogger(name) if name not in cls.log_list: with Logmanager.log_list_lock: if name not in cls.log_list: cls.log_list.append(name) logger.setLevel(logging.INFO) logfile = f"{Logmanager.path}/log.log" fh = RotatingFileHandler( logfile, mode="a", maxBytes=1024 * 1024 * 10, backupCount=2, encoding="utf-8", ) formatter = logging.Formatter( "[%(name)s] [%(asctime)s] [%(levelname)s] %(message)s", "%Y%m%d-%H:%M:%S", ) fh.setFormatter(formatter) logger.addHandler(fh) ch = logging.StreamHandler() ch.setFormatter(formatter) logger.addHandler(ch) fh.close() ch.close() return logger # Path: utils/task/set_args.py import os import argparse from .model import Task from ..log import Logmanager def is_file_exists(file_path) -> bool: r = os.path.exists(file_path) if not r: LOGGER.error(f"文件{file_path}不存在") return r def create_folder_if_not_exists(folder_path): if not folder_path: return if not os.path.exists(folder_path): os.makedirs(folder_path) def parse_command_line_args(): """ -c --config: 配置文件 --log: 日志存放位置 """ parser = argparse.ArgumentParser(description="运行参数") parser.add_argument("--config", "-c", type=str, default="./config.ini", help="配置文件") parser.add_argument("--log", type=str, default="./", help="日志存放文件夹的位置,默认放到当前路径") args = parser.parse_args() # 初始化日志模块 global LOGGER create_folder_if_not_exists(args.log)
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
39
Edit dataset card