prompt
stringlengths
1.74k
34.3k
ref
stringlengths
4
432
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: daswer123/rvc-python # Path: rvc_python/infer.py def infer_file( input_path, model_path, index_path = "", device = "cpu:0", f0method = "harvest", opt_path = "out.wav", index_rate = 0.5, filter_radius = 3, resample_sr = 0, rms_mix_rate = 1, protect = 0.33, f0up_key = 0, version = "v2" ): lib_dir = os.path.dirname(os.path.abspath(__file__)) download_rvc_models(lib_dir) config = Config(lib_dir,device) vc = VC(lib_dir,config) vc.get_vc(model_path,version) wav_opt = vc.vc_single( sid=1, input_audio_path=input_path, f0_up_key=f0up_key, f0_method=f0method, file_index=index_path, index_rate=index_rate, filter_radius=filter_radius, resample_sr=resample_sr, rms_mix_rate=rms_mix_rate, protect=protect, f0_file="", file_index2="" ) wavfile.write(opt_path, vc.tgt_sr, wav_opt) return opt_path # Path: rvc_python/infer.py def infer_files( dir_path, model_path, paths=[], index_path="", device="cuda:0", f0method="harvest", opt_dir="out/", index_rate=0.5, filter_radius=3, resample_sr=0, rms_mix_rate=1, protect=0.33, f0up_key=0, version="v2", out_format="wav" ): # Create output directory if it does not exist os.makedirs(opt_dir, exist_ok=True) # Determine the files to process audio_files = paths if paths else glob(os.path.join(dir_path, '*.*')) # Initialize some common VC-related variables outside of loop lib_dir = os.path.dirname(os.path.abspath(__file__)) download_rvc_models(lib_dir) config = Config(lib_dir, device) vc = VC(lib_dir, config) vc.get_vc(model_path, version) processed_files = [] for input_audio_path in audio_files: output_filename = os.path.splitext(os.path.basename(input_audio_path))[0] + '.' + out_format opt_path = os.path.join(opt_dir, output_filename) wav_opt = vc.vc_single( sid=1, input_audio_path=input_audio_path, f0_up_key=f0up_key, f0_method=f0method, file_index=index_path, index_rate=index_rate, filter_radius=filter_radius, resample_sr=resample_sr, rms_mix_rate=rms_mix_rate, protect=protect, f0_file="", file_index2="" ) wavfile.write(opt_path, vc.tgt_sr, wav_opt) processed_files.append(opt_path) return processed_files # Path: rvc_python/__main__.py import argparse import sys import os from argparse import ArgumentParser from rvc_python.infer import infer_file,infer_files parser = ArgumentParser(description="RVC inference") # Create a mutually exclusive group for input - only one of them can be provided input_group = parser.add_mutually_exclusive_group(required=True) input_group.add_argument("-i", "--input", type=str, help="Path to input file") input_group.add_argument("-d", "--dir", type=str, help="Directory path containing audio files") parser.add_argument("-pi","--pitch", default=0, type=int, help="Transpose (integer, number of semitones)") parser.add_argument("-ip","--index", type=str, nargs='?', default="", help="Path to index file (optional)") parser.add_argument("-me","--method", type=str, default="harvest", choices=['harvest', "crepe", "rmvpe", 'pm'], help="Pitch extraction algorithm") parser.add_argument("-v","--version", type=str, default="v2", choices=['v1', "v2"], help="Model version") parser.add_argument("-o","--output", type=str, nargs='?', default="out.wav", help="Output path for single file, or output directory for multiple files") parser.add_argument("-mp","--model", type=str, required=True, help="Path to model file") parser.add_argument("-ir","--index_rate", type=float, default=0.5, help="Search feature ratio") parser.add_argument("-de","--device", type=str, default="cuda:0", help="Device to use (e.g., cpu:0, cuda:0)") parser.add_argument("-fr","--filter_radius", type=int, default=3, help="Apply median filtering to the pitch results") parser.add_argument("-rsr","--resample_sr", type=int, default=0, help="Resample rate for the output audio") parser.add_argument("-rmr","--rms_mix_rate", type=float,default=0.25 ,help="Volume envelope mix rate") parser.add_argument("-pr",'--protect' ,type=float,default=0.33 ,help='Protect voiceless consonants and breath sounds') args = parser.parse_args() if args.input: # Single file processing inferred_path = infer_file( input_path=args.input, model_path=args.model, index_path=args.index, device=args.device, f0method=args.method, f0up_key=args.pitch, opt_path=args.output, index_rate=args.index_rate, filter_radius=args.filter_radius, resample_sr=args.resample_sr, rms_mix_rate=args.rms_mix_rate, protect=args.protect, version=args.version ) elif args.dir: # Directory processing
processed_files = infer_files(
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: CodeBrugs/ToolSculpt # Path: src/tools/Tool2/tool2_functions.py def process_data(data): """ Procesa los datos utilizando la lógica específica de Tool2. Parameters: - data (str): Datos de entrada. Returns: - result (str): Resultado del procesamiento. """ # Operación específica: Invertir los datos inverted_data = data[::-1] # Operación específica: Aplicar una transformación especial processed_data = inverted_data.upper() # Convertir a mayúsculas como ejemplo result = f"Tool2 aplicó una transformación especial: {processed_data}" return result # Path: src/tools/Tool2/tool2_functions.py def analyze_data(data): """ Analiza los datos utilizando la lógica específica de Tool2. Parameters: - data (str): Datos de entrada. Returns: - analysis (str): Resultado del análisis. """ # Operación específica: Contar la cantidad de caracteres alfabéticos alphabet_count = sum(c.isalpha() for c in data) # Operación específica: Realizar un análisis basado en la cantidad de caracteres alfabéticos if alphabet_count > len(data) // 2: analysis = "Tool2 encontró más de la mitad de los caracteres como alfabéticos." else: analysis = "Tool2 encontró menos de la mitad de los caracteres como alfabéticos." return analysis # Path: src/tools/Tool2/tool2_functions.py def perform_additional_task(): """ Realiza una tarea adicional específica de Tool2. Returns: - task_result (str): Resultado de la tarea. """ # Operación específica: Realizar una tarea adicional task_result = "Tool2 realizó una tarea adicional con éxito." return task_result # Path: tests/test_tools/test_tool2.py import unittest from src.tools.Tool2.tool2_functions import process_data, analyze_data, perform_additional_task # tests/test_tools/test_tool2.py class TestTool2Functions(unittest.TestCase): def test_process_data(self): # Prueba para la función process_data input_data = "example_data" result = process_data(input_data) self.assertEqual(result, "Tool2 processed the data: A_SPECIAL_TRANSFORMATION") def test_analyze_data(self): # Prueba para la función analyze_data input_data = "example_data" result = analyze_data(input_data) self.assertEqual(result, "Tool2 analyzed the data: example_data. Alphabetic characters: 11") def test_perform_additional_task(self): # Prueba para la función perform_additional_task
result = perform_additional_task()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: run-llama/rags # Path: st_utils.py def add_sidebar() -> None: """Add sidebar.""" with st.sidebar: agent_registry = cast(AgentCacheRegistry, st.session_state.agent_registry) st.session_state.cur_agent_ids = agent_registry.get_agent_ids() choices = ["Create a new agent"] + st.session_state.cur_agent_ids # by default, set index to 0. if value is in selected_id, set index to that index = 0 if "selected_id" in st.session_state.keys(): if st.session_state.selected_id is not None: index = choices.index(st.session_state.selected_id) # display buttons st.radio( "Agents", choices, index=index, on_change=update_selected_agent, key="agent_selector", ) # Path: st_utils.py def get_current_state() -> CurrentSessionState: """Get current state. This includes current state stored in session state and derived from it, e.g. - agent registry - selected agent - selected cache - agent builder - builder agent """ # get agent registry agent_registry = AgentCacheRegistry(str(AGENT_CACHE_DIR)) if "agent_registry" not in st.session_state.keys(): st.session_state.agent_registry = agent_registry if "cur_agent_ids" not in st.session_state.keys(): st.session_state.cur_agent_ids = agent_registry.get_agent_ids() if "selected_id" not in st.session_state.keys(): st.session_state.selected_id = None # set selected cache if doesn't exist if ( "selected_cache" not in st.session_state.keys() or st.session_state.selected_cache is None ): # update selected cache if st.session_state.selected_id is None: st.session_state.selected_cache = None else: # load agent from directory agent_registry = cast(AgentCacheRegistry, st.session_state.agent_registry) agent_cache = agent_registry.get_agent_cache(st.session_state.selected_id) st.session_state.selected_cache = agent_cache # set builder agent / agent builder if ( "builder_agent" not in st.session_state.keys() or st.session_state.builder_agent is None or "agent_builder" not in st.session_state.keys() or st.session_state.agent_builder is None ): if ( "selected_cache" in st.session_state.keys() and st.session_state.selected_cache is not None ): # create builder agent / tools from selected cache builder_agent, agent_builder = load_meta_agent_and_tools( cache=st.session_state.selected_cache, agent_registry=st.session_state.agent_registry, # NOTE: we will probably generalize this later into different # builder configs is_multimodal=get_cached_is_multimodal(), ) else: # create builder agent / tools from new cache builder_agent, agent_builder = load_meta_agent_and_tools( agent_registry=st.session_state.agent_registry, is_multimodal=get_is_multimodal(), ) st.session_state.builder_agent = builder_agent st.session_state.agent_builder = agent_builder return CurrentSessionState( agent_registry=st.session_state.agent_registry, selected_id=st.session_state.selected_id, selected_cache=st.session_state.selected_cache, agent_builder=st.session_state.agent_builder, cache=st.session_state.agent_builder.cache, builder_agent=st.session_state.builder_agent, ) # Path: core/utils.py def get_image_and_text_nodes( nodes: List[NodeWithScore], ) -> Tuple[List[NodeWithScore], List[NodeWithScore]]: image_nodes = [] text_nodes = [] for res_node in nodes: if isinstance(res_node.node, ImageNode): image_nodes.append(res_node) else: text_nodes.append(res_node) return image_nodes, text_nodes # Path: pages/3_🤖_Generated_RAG_Agent.py import streamlit as st import pandas as pd from st_utils import add_sidebar, get_current_state from core.utils import get_image_and_text_nodes from llama_index.schema import MetadataMode from llama_index.chat_engine.types import AGENT_CHAT_RESPONSE_TYPE from typing import Dict, Optional """Streamlit page showing builder config.""" #################### #### STREAMLIT ##### #################### st.set_page_config( page_title="Generated RAG Agent", page_icon="🦙", layout="centered", initial_sidebar_state="auto", menu_items=None, ) st.title("Generated RAG Agent") current_state = get_current_state() add_sidebar() if ( "agent_messages" not in st.session_state.keys() ): # Initialize the chat messages history st.session_state.agent_messages = [ {"role": "assistant", "content": "Ask me a question!"} ] def display_sources(response: AGENT_CHAT_RESPONSE_TYPE) -> None:
image_nodes, text_nodes = get_image_and_text_nodes(response.source_nodes)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: open-mmlab/Amphion # Path: modules/activation_functions/gated_activation_unit.py class GaU(nn.Module): r"""Gated Activation Unit (GaU) proposed in `Gated Activation Units for Neural Networks <https://arxiv.org/pdf/1606.05328.pdf>`_. Args: channels: number of input channels. kernel_size: kernel size of the convolution. dilation: dilation rate of the convolution. d_context: dimension of context tensor, None if don't use context. """ def __init__( self, channels: int, kernel_size: int = 3, dilation: int = 1, d_context: int = None, ): super().__init__() self.context = d_context self.conv = Conv1d( channels, channels * 2, kernel_size, dilation=dilation, padding=dilation * (kernel_size - 1) // 2, ) if self.context: self.context_proj = Conv1d(d_context, channels * 2, 1) def forward(self, x: torch.Tensor, context: torch.Tensor = None): r"""Calculate forward propagation. Args: x: input tensor with shape [B, C, T]. context: context tensor with shape [B, ``d_context``, T], default to None. """ h = self.conv(x) if self.context: h = h + self.context_proj(context) h1, h2 = h.chunk(2, 1) h = torch.tanh(h1) * torch.sigmoid(h2) return h # Path: modules/general/utils.py def Conv1d(*args, **kwargs): r"""Wrapper of ``nn.Conv1d`` with kaiming_normal_ initialization.""" layer = nn.Conv1d(*args, **kwargs) nn.init.kaiming_normal_(layer.weight) return layer # Path: modules/diffusion/bidilconv/residual_block.py import math import torch import torch.nn as nn from modules.activation_functions import GaU from modules.general.utils import Conv1d # Copyright (c) 2023 Amphion. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. class ResidualBlock(nn.Module): r"""Residual block with dilated convolution, main portion of ``BiDilConv``. Args: channels: The number of channels of input and output. kernel_size: The kernel size of dilated convolution. dilation: The dilation rate of dilated convolution. d_context: The dimension of content encoder output, None if don't use context. """ def __init__( self, channels: int = 256, kernel_size: int = 3, dilation: int = 1, d_context: int = None, ): super().__init__() self.context = d_context
self.gau = GaU(
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: ise-uiuc/magicoder # Path: src/magicoder/decontamination/benchmark_data.py FILTER_OUT = {k: v() for k, v in LAZY_FILTER_OUT.items()} # Path: src/magicoder/decontamination/utils.py def add_dict(dict1: dict, dict2: dict) -> None: """ Add the values of dict2 to dict1. All values must be int, float or dictionaries that also verify this condition. Will modify dict1 and return None """ for key, value in dict2.items(): if isinstance(value, (int, float)): if key not in dict1: dict1[key] = 0 dict1[key] += value elif isinstance(value, dict): if key not in dict1: dict1[key] = {} assert isinstance(dict1[key], dict) add_dict(dict1[key], value) else: raise ValueError(f"Invalid type for key/value {key}: {value}") # Path: src/magicoder/decontamination/utils.py def shard_dataset(ds, shard_size, output_dir, num_proc): if ds._indices is not None: dataset_nbytes = ds.data.nbytes * len(ds._indices) / len(ds.data) else: dataset_nbytes = ds.data.nbytes num_shards = int(dataset_nbytes / shard_size) + 1 print(f"Number of shards: {num_shards}") print("sharding the dataset") t_start = time.time() shards = ( ds.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards) ) # use f"{OUT_PATH}/data/train-{index:05d}-of-{num_shards:05d}.json" instead for json files filenames = ( f"{output_dir}/train-{index:05d}-of-{num_shards:05d}.parquet" for index in range(num_shards) ) with Pool(num_proc) as p: list( tqdm( p.imap_unordered(save_shard, zip(filenames, shards), chunksize=4), total=num_shards, ) ) print(f"Time to save dataset: {time.time()-t_start:.2f}") # Path: src/magicoder/decontamination/find_substrings.py import argparse import json import os import shutil from copy import deepcopy from glob import glob from pathlib import Path from datasets import load_dataset from magicoder.utils import write_jsonl from .benchmark_data import FILTER_OUT from .utils import add_dict, shard_dataset # type: ignore """Migrated from: https://github.com/bigcode-project/bigcode-dataset. License: Apache 2.0""" SHARD_SIZE = 1000 << 20 # 1GB LANGUAGE_COL = "lang" # LANGUAGES = ["Python", "Java", "JavaScript"] def dump_benchmarks(file_path: str): """ Dump the dictionary of benchmark samples that are filtered out """ with open(file_path, "w") as f: json.dump(FILTER_OUT, f, indent=2) def filter_reason_to_benchmark_name(filter_reason: str): assert filter_reason.endswith("_match") return filter_reason[:-6] def benchmark_name_to_filter_reason(benchmark_name: str): return f"{benchmark_name}_match" def update_benchmark_dict( filter_out: dict, benchmark_cache: str, excluded_data_cache: str ): """ Iterates on current benchmark-samples. If a sample is found in the cached benchmark-samples, it is removed (it does not need to be searched), and the corresponding data-samples from the cache are added to `exclude_data` Returns: - `updated`: an updated benchmark dict where samples from the cache are removed (they do not need to be searched anymore) - `exclude_data`: a list of files to remove from the dataset """ updated = deepcopy(filter_out) exclude_data = [] with open(benchmark_cache) as f: benchmark_cache = json.load(f) with open(excluded_data_cache) as f: excluded_data_cache = json.load(f) for bench, samples in filter_out.items(): for bench_sample in samples: # Benchmark-sample was found in cache if bench in benchmark_cache and bench_sample in benchmark_cache[bench]: # No need to search for this sample in the dataset updated[bench].remove(bench_sample) # Corresponding data-samples will be excluded from the dataset. exclude_data += [ data_sample for data_sample in excluded_data_cache if data_sample["filter_reason"] == benchmark_name_to_filter_reason(bench) and data_sample["matched_substring"] == bench_sample ] print("After loading cache, will search for:") for benchmark, values in updated.items(): print(f" num strings from {benchmark}: {len(values)}") # Remove empty benchmarks updated = {key: value for key, value in updated.items() if len(value) > 0} return updated, exclude_data def find_substrings(data, columns, filter_out, return_matched=False): """ filter_out: Dict[str, List[str]] mapping from benchmark name to list of strings that need to be filtered-out. Return True, None if the file should be included in the dataset. Otherwise return False and some metadata about the file excluded """ content = "\n\n".join([data[col].lower() for col in columns]) # For each substring, try to find it in the file (case insensitive) for benchmark, substrings in filter_out.items(): for substring in substrings: if substring.lower() in content: if return_matched: return False, benchmark_name_to_filter_reason(benchmark), substring else: return False, benchmark_name_to_filter_reason(benchmark) # Return True, None if none of the substrings was found if return_matched: return True, None, None else: return True, None def aggregate_meta(tmp_meta_dir: str): res = {} for file in glob(f"{tmp_meta_dir}/*-meta.json"): with open(file, "r") as f: meta = json.load(f)
add_dict(res, meta)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: KwaiKEG/KwaiAgents # Path: kwaiagents/config.py class Config(object): def __init__(self) -> None: """Initialize the Config class""" self.fast_llm_model = "gpt-3.5-turbo" self.smart_llm_model = "gpt-4" self.use_local_llm = False self.local_llm_host = "localhost" self.local_llm_port = 8888 self.browse_chunk_max_length = 4096 self.browse_summary_max_token = 300 self.selenium_web_browser = "chrome" self.llm_max_retries = 5 self.temperature = 1.0 self.max_tokens_num = 4096 self.chain_logger = ChainMessageLogger() def __str__(self): s = "============ CONFIG ============\n" for key, val in self.__dict__.items(): s += "· " + key.upper() + ":\t" + str(val) + '\n' return s def to_json_file(self, fname): with open(fname, "w") as f: json.dump({k:v for k, v in self.__dict__.items() if k not in ["chain_logger"]},f, ensure_ascii=False, indent=2) def set_chain_logger(self, chain_logger): self.chain_logger = chain_logger # Path: kwaiagents/tools/base.py class BaseResult(object): def __init__(self, json_data): self.json_data = json_data def __str__(self): return pprint.pformat(self.json_data) @property def answer(self): return "" @property def answer_md(self): return self.answer @property def urls(self): return list() @property def prompt_responses(self): return list() # Path: kwaiagents/tools/base.py class BaseTool(object): def __init__(self, *args, **kwargs): pass def __call__(self): return BaseResult({}) # Path: kwaiagents/tools/timedelta.py from datetime import datetime from dateutil.relativedelta import relativedelta from kwaiagents.config import Config from kwaiagents.tools.base import BaseResult, BaseTool #!/usr/bin/env python # -*- coding: utf-8 -*- # Author: PAN Leyi # Email: panleyi@kuaishou.com class TimeDeltaResult(BaseResult): @property def answer(self): item = self.json_data rst = "" for key in item.keys(): rst += f'{key}: {item[key]}\n' return rst
class TimeDeltaTool(BaseTool):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: EnVision-Research/LucidDreamer # Path: utils/graphics_utils.py def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0): Rt = np.zeros((4, 4)) Rt[:3, :3] = R.transpose() Rt[:3, 3] = t Rt[3, 3] = 1.0 C2W = np.linalg.inv(Rt) cam_center = C2W[:3, 3] cam_center = (cam_center + translate) * scale C2W[:3, 3] = cam_center Rt = np.linalg.inv(C2W) return np.float32(Rt) # Path: utils/graphics_utils.py def focal2fov(focal, pixels): return 2*math.atan(pixels/(2*focal)) # Path: utils/graphics_utils.py def fov2focal(fov, pixels): return pixels / (2 * math.tan(fov / 2)) # Path: utils/pointe_utils.py def init_from_pointe(prompt): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print('creating base model...') base_name = 'base40M-textvec' base_model = model_from_config(MODEL_CONFIGS[base_name], device) base_model.eval() base_diffusion = diffusion_from_config(DIFFUSION_CONFIGS[base_name]) print('creating upsample model...') upsampler_model = model_from_config(MODEL_CONFIGS['upsample'], device) upsampler_model.eval() upsampler_diffusion = diffusion_from_config(DIFFUSION_CONFIGS['upsample']) print('downloading base checkpoint...') base_model.load_state_dict(load_checkpoint(base_name, device)) print('downloading upsampler checkpoint...') upsampler_model.load_state_dict(load_checkpoint('upsample', device)) sampler = PointCloudSampler( device=device, models=[base_model, upsampler_model], diffusions=[base_diffusion, upsampler_diffusion], num_points=[1024, 4096 - 1024], aux_channels=['R', 'G', 'B'], guidance_scale=[3.0, 0.0], model_kwargs_key_filter=('texts', ''), # Do not condition the upsampler at all ) # Produce a sample from the model. samples = None for x in tqdm(sampler.sample_batch_progressive(batch_size=1, model_kwargs=dict(texts=[prompt]))): samples = x pc = sampler.output_to_point_clouds(samples)[0] xyz = pc.coords rgb = np.zeros_like(xyz) rgb[:,0],rgb[:,1],rgb[:,2] = pc.channels['R'],pc.channels['G'],pc.channels['B'] return xyz,rgb # Path: utils/sh_utils.py def SH2RGB(sh): return sh * C0 + 0.5 # Path: utils/general_utils.py def inverse_sigmoid_np(x): return np.log(x/(1-x)) # Path: scene/gaussian_model.py class GaussianModel: def setup_functions(self): def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation): def __init__(self, sh_degree : int): def capture(self): def restore(self, model_args, training_args): def get_scaling(self): def get_rotation(self): def get_xyz(self): def get_background(self): def get_features(self): def get_opacity(self): def get_covariance(self, scaling_modifier = 1): def oneupSHdegree(self): def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float): def training_setup(self, training_args): def update_learning_rate(self, iteration): def update_feature_learning_rate(self, iteration): def update_rotation_learning_rate(self, iteration): def update_scaling_learning_rate(self, iteration): def construct_list_of_attributes(self): def save_ply(self, path): def reset_opacity(self): def load_ply(self, path): def replace_tensor_to_optimizer(self, tensor, name): def _prune_optimizer(self, mask): def prune_points(self, mask): def cat_tensors_to_optimizer(self, tensors_dict): def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation): def densify_and_split(self, grads, grad_threshold, scene_extent, N=2): def densify_and_clone(self, grads, grad_threshold, scene_extent): def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size): def add_densification_stats(self, viewspace_point_tensor, update_filter): L = build_scaling_rotation(scaling_modifier * scaling, rotation) # Path: scene/dataset_readers.py import os import sys import torch import random import torch.nn.functional as F import numpy as np import json from PIL import Image from typing import NamedTuple from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from pathlib import Path from utils.pointe_utils import init_from_pointe from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from utils.general_utils import inverse_sigmoid_np from scene.gaussian_model import BasicPointCloud # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class RandCameraInfo(NamedTuple): uid: int R: np.array T: np.array FovY: np.array FovX: np.array width: int height: int delta_polar : np.array delta_azimuth : np.array delta_radius : np.array class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: VRSEN/agency-swarm # Path: agency_swarm/tools/base_tool.py class BaseTool(OpenAISchema, ABC): def __init__(self, **kwargs): super().__init__(**kwargs) # # Exclude 'run' method from Pydantic model fields # self.model_fields.pop("run", None) @abstractmethod def run(self, **kwargs): pass # Path: agency_swarm/tools/browsing/util/selenium.py def get_web_driver(): try: from selenium import webdriver from selenium.webdriver.chrome.service import Service as ChromeService except ImportError: print("Selenium not installed. Please install it with pip install selenium") raise ImportError try: from webdriver_manager.chrome import ChromeDriverManager except ImportError: print("webdriver_manager not installed. Please install it with pip install webdriver-manager") raise ImportError try: from selenium_stealth import stealth except ImportError: print("selenium_stealth not installed. Please install it with pip install selenium-stealth") raise ImportError global wd if wd: return wd global selenium_config chrome_profile_path = selenium_config.get("chrome_profile_path", None) profile_directory = None user_data_dir = None if isinstance(chrome_profile_path, str) and os.path.exists(chrome_profile_path): profile_directory = os.path.split(chrome_profile_path)[-1].strip("\\").rstrip("/") user_data_dir = os.path.split(chrome_profile_path)[0].strip("\\").rstrip("/") print(f"Using Chrome profile: {profile_directory}") print(f"Using Chrome user data dir: {user_data_dir}") print(f"Using Chrome profile path: {chrome_profile_path}") chrome_options = webdriver.ChromeOptions() # Removed headless and other options for debugging purposes chrome_driver_path = ChromeDriverManager().install() if selenium_config.get("headless", False): chrome_options.add_argument('--headless') chrome_options.add_argument("--window-size=960,1080") chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--disable-dev-shm-usage') chrome_options.add_argument("--disable-extensions") chrome_options.add_argument("--disable-popup-blocking") chrome_options.add_argument("--disable-web-security") chrome_options.add_argument("--allow-running-insecure-content") chrome_options.add_argument("--disable-gpu") chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"]) chrome_options.add_experimental_option('useAutomationExtension', False) if user_data_dir and profile_directory: chrome_options.add_argument(f"user-data-dir={user_data_dir}") chrome_options.add_argument(f"profile-directory={profile_directory}") try: wd = webdriver.Chrome(service=ChromeService(chrome_driver_path), options=chrome_options) print("WebDriver initialized successfully.") # Print the actual profile path being used if wd.capabilities['chrome']['userDataDir']: print(f"Profile path in use: {wd.capabilities['chrome']['userDataDir']}") except Exception as e: print(f"Error initializing WebDriver: {e}") raise stealth( wd, languages=["en-US", "en"], vendor="Google Inc.", platform="Win32", webgl_vendor="Intel Inc.", renderer="Intel Iris OpenGL Engine", fix_hairline=True, ) # wd.set_window_size(960, 1080) wd.implicitly_wait(3) return wd # Path: agency_swarm/tools/browsing/util/selenium.py def set_web_driver(new_wd): global wd wd = remove_highlight_and_labels(wd) wd = new_wd # Path: agency_swarm/tools/browsing/GoBack.py import time from agency_swarm.tools import BaseTool from agency_swarm.tools.browsing.util.selenium import get_web_driver, set_web_driver class GoBack(BaseTool): """ This tool allows you to go back 1 page in the browser history. Use it in case of a mistake or if a page shows you unexpected content. """ def run(self):
wd = get_web_driver()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: resemble-ai/resemble-enhance # Path: resemble_enhance/melspec.py class MelSpectrogram(nn.Module): def __init__(self, hp: HParams): """ Torch implementation of Resemble's mel extraction. Note that the values are NOT identical to librosa's implementation due to floating point precisions. """ super().__init__() self.hp = hp self.melspec = TorchMelSpectrogram( hp.wav_rate, n_fft=hp.n_fft, win_length=hp.win_size, hop_length=hp.hop_size, f_min=0, f_max=hp.wav_rate // 2, n_mels=hp.num_mels, power=1, normalized=False, # NOTE: Folowing librosa's default. pad_mode="constant", norm="slaney", mel_scale="slaney", ) self.register_buffer("stft_magnitude_min", torch.FloatTensor([hp.stft_magnitude_min])) self.min_level_db = 20 * np.log10(hp.stft_magnitude_min) self.preemphasis = hp.preemphasis self.hop_size = hp.hop_size def forward(self, wav, pad=True): """ Args: wav: [B, T] """ device = wav.device if wav.is_mps: wav = wav.cpu() self.to(wav.device) if self.preemphasis > 0: wav = torch.nn.functional.pad(wav, [1, 0], value=0) wav = wav[..., 1:] - self.preemphasis * wav[..., :-1] mel = self.melspec(wav) mel = self._amp_to_db(mel) mel_normed = self._normalize(mel) assert not pad or mel_normed.shape[-1] == 1 + wav.shape[-1] // self.hop_size # Sanity check mel_normed = mel_normed.to(device) return mel_normed # (M, T) def _normalize(self, s, headroom_db=15): return (s - self.min_level_db) / (-self.min_level_db + headroom_db) def _amp_to_db(self, x): return x.clamp_min(self.hp.stft_magnitude_min).log10() * 20 # Path: resemble_enhance/denoiser/hparams.py class HParams(HParamsBase): batch_size_per_gpu: int = 128 distort_prob: float = 0.5 # Path: resemble_enhance/denoiser/unet.py class UNet(nn.Module): def __init__(self, input_dim, output_dim, hidden_dim=16, num_blocks=4, num_middle_blocks=2): super().__init__() self.input_dim = input_dim self.output_dim = output_dim self.input_proj = nn.Conv2d(input_dim, hidden_dim, 3, padding=1) self.encoder_blocks = nn.ModuleList( [ UNetBlock(input_dim=hidden_dim * 2**i, output_dim=hidden_dim * 2 ** (i + 1), scale_factor=0.5) for i in range(num_blocks) ] ) self.middle_blocks = nn.ModuleList( [UNetBlock(input_dim=hidden_dim * 2**num_blocks) for _ in range(num_middle_blocks)] ) self.decoder_blocks = nn.ModuleList( [ UNetBlock(input_dim=hidden_dim * 2 ** (i + 1), output_dim=hidden_dim * 2**i, scale_factor=2) for i in reversed(range(num_blocks)) ] ) self.head = nn.Sequential( nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1), nn.GELU(), nn.Conv2d(hidden_dim, output_dim, 1), ) @property def scale_factor(self): return 2 ** len(self.encoder_blocks) def pad_to_fit(self, x): """ Args: x: (b c h w), input Returns: x: (b c h' w'), padded input """ hpad = (self.scale_factor - x.shape[2] % self.scale_factor) % self.scale_factor wpad = (self.scale_factor - x.shape[3] % self.scale_factor) % self.scale_factor return F.pad(x, (0, wpad, 0, hpad)) def forward(self, x): """ Args: x: (b c h w), input Returns: o: (b c h w), output """ shape = x.shape x = self.pad_to_fit(x) x = self.input_proj(x) s_list = [] for block in self.encoder_blocks: x, s = block(x) s_list.append(s) for block in self.middle_blocks: x, _ = block(x) for block, s in zip(self.decoder_blocks, reversed(s_list)): x, _ = block(x, s) x = self.head(x) x = x[..., : shape[2], : shape[3]] return x def test(self, shape=(3, 512, 256)): import ptflops macs, params = ptflops.get_model_complexity_info( self, shape, as_strings=True, print_per_layer_stat=True, verbose=True, ) print(f"macs: {macs}") print(f"params: {params}") # Path: resemble_enhance/denoiser/denoiser.py import logging import torch import torch.nn.functional as F from torch import Tensor, nn from ..melspec import MelSpectrogram from .hparams import HParams from .unet import UNet logger = logging.getLogger(__name__) def _normalize(x: Tensor) -> Tensor: return x / (x.abs().max(dim=-1, keepdim=True).values + 1e-7) class Denoiser(nn.Module): @property def stft_cfg(self) -> dict: hop_size = self.hp.hop_size return dict(hop_length=hop_size, n_fft=hop_size * 4, win_length=hop_size * 4) @property def n_fft(self): return self.stft_cfg["n_fft"] @property def eps(self): return 1e-7 def __init__(self, hp: HParams): super().__init__() self.hp = hp self.net = UNet(input_dim=3, output_dim=3)
self.mel_fn = MelSpectrogram(hp)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: PKU-YuanGroup/Chat-UniVi # Path: ChatUniVi/constants.py DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>" # Path: ChatUniVi/constants.py DEFAULT_IM_START_TOKEN = "<im_start>" # Path: ChatUniVi/constants.py DEFAULT_IM_END_TOKEN = "<im_end>" # Path: ChatUniVi/model/builder.py import os import shutil import torch from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig from ChatUniVi.model import * from ChatUniVi.constants import DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN from accelerate import init_empty_weights, load_checkpoint_and_dispatch from transformers import AutoConfig, AutoModelForCausalLM from huggingface_hub import hf_hub_download from peft import PeftModel from peft import PeftModel def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto"): kwargs = {"device_map": device_map} if load_8bit: kwargs['load_in_8bit'] = True elif load_4bit: kwargs['load_in_4bit'] = True kwargs['quantization_config'] = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type='nf4' ) else: kwargs['torch_dtype'] = torch.float16 if 'chatunivi' in model_name.lower(): # Load ChatUniVi model if 'lora' in model_name.lower() and model_base is not None: lora_cfg_pretrained = AutoConfig.from_pretrained(model_path) tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) print('Loading ChatUniVi from base model...') model = ChatUniViLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs) token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features if model.lm_head.weight.shape[0] != token_num: model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype)) model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype)) print('Loading additional ChatUniVi weights...') if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')): non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu') else: # this is probably from HF Hub def load_from_hf(repo_id, filename, subfolder=None): cache_file = hf_hub_download( repo_id=repo_id, filename=filename, subfolder=subfolder) return torch.load(cache_file, map_location='cpu') non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin') non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()} if any(k.startswith('model.model.') for k in non_lora_trainables): non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()} model.load_state_dict(non_lora_trainables, strict=False) print('Loading LoRA weights...') model = PeftModel.from_pretrained(model, model_path) print('Merging LoRA weights...') model = model.merge_and_unload() print('Model is loaded...') elif model_base is not None: # this may be mm projector only print('Loading ChatUniVi from base model...') tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) cfg_pretrained = AutoConfig.from_pretrained(model_path) model = ChatUniViLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs) mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu') mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()} model.load_state_dict(mm_projector_weights, strict=False) else: tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs) else: # Load language model if model_base is not None: # PEFT model tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto") print(f"Loading LoRA weights from {model_path}") model = PeftModel.from_pretrained(model, model_path) print(f"Merging weights") model = model.merge_and_unload() print('Convert to FP16...') model.to(torch.float16) else: use_fast = False tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs) image_processor = None if 'chatunivi' in model_name.lower(): mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False) mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True) if mm_use_im_patch_token:
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: tatsu-lab/gpt_paper_assistant # Path: arxiv_scraper.py class Paper: # paper class should track the list of authors, paper title, abstract, arxiv id authors: List[str] title: str abstract: str arxiv_id: str # add a hash function using arxiv_id def __hash__(self): return hash(self.arxiv_id) # Path: arxiv_scraper.py class EnhancedJSONEncoder(json.JSONEncoder): def default(self, o): if dataclasses.is_dataclass(o): return dataclasses.asdict(o) return super().default(o) # Path: filter_papers.py import configparser import dataclasses import json import os import re import retry from collections import defaultdict from typing import List from openai import OpenAI from tqdm import tqdm from arxiv_scraper import Paper from arxiv_scraper import EnhancedJSONEncoder def filter_by_author(all_authors, papers, author_targets, config): # filter and parse the papers selected_papers = {} # pass to output all_papers = {} # dict for later filtering sort_dict = {} # dict storing key and score # author based selection for paper in papers: all_papers[paper.arxiv_id] = paper if config["FILTERING"].getboolean("author_match"): for author in paper.authors: if author in all_authors: for alias in all_authors[author]: if alias["authorId"] in author_targets: selected_papers[paper.arxiv_id] = { **dataclasses.asdict(paper), **{"COMMENT": "Author match"}, } sort_dict[paper.arxiv_id] = float( config["SELECTION"]["author_match_score"] ) break return selected_papers, all_papers, sort_dict def filter_papers_by_hindex(all_authors, papers, config): # filters papers by checking to see if there's at least one author with > hcutoff hindex paper_list = [] for paper in papers: max_h = 0 for author in paper.authors: if author in all_authors: max_h = max( max_h, max([alias["hIndex"] for alias in all_authors[author]]) ) if max_h >= float(config["FILTERING"]["hcutoff"]): paper_list.append(paper) return paper_list def calc_price(model, usage): if model == "gpt-4-1106-preview": return (0.01 * usage.prompt_tokens + 0.03 * usage.completion_tokens) / 1000.0 if model == "gpt-4": return (0.03 * usage.prompt_tokens + 0.06 * usage.completion_tokens) / 1000.0 if (model == "gpt-3.5-turbo") or (model == "gpt-3.5-turbo-1106"): return (0.0015 * usage.prompt_tokens + 0.002 * usage.completion_tokens) / 1000.0 @retry.retry(tries=3, delay=2) def call_chatgpt(full_prompt, openai_client, model, num_samples): return openai_client.chat.completions.create( model=model, messages=[{"role": "user", "content": full_prompt}], temperature=0.0, n=int(num_samples), seed=0, ) def run_and_parse_chatgpt(full_prompt, openai_client, config): # just runs the chatgpt prompt, tries to parse the resulting JSON completion = call_chatgpt( full_prompt, openai_client, config["SELECTION"]["model"], config["FILTERING"]["num_samples"], ) json_dicts = defaultdict(list) for choice in completion.choices: out_text = choice.message.content out_text = re.sub("```jsonl\n", "", out_text) out_text = re.sub("```", "", out_text) out_text = re.sub(r"\n+", "\n", out_text) out_text = re.sub("},", "}", out_text).strip() # split out_text line by line and parse each as a json. for line in out_text.split("\n"): # try catch block to attempt to parse json try: loaded_output = json.loads(line) json_dicts[loaded_output["ARXIVID"]].append(loaded_output) except Exception as ex: if config["OUTPUT"].getboolean("debug_messages"): print("Exception happened " + str(ex)) print("Failed to parse LM output as json") print(out_text) print("RAW output") print(completion.choices[0].message.content) continue all_dict = [] for id, json_list in json_dicts.items(): rel_score = sum([float(jdict["RELEVANCE"]) for jdict in json_list]) / float( len(json_list) ) nov_score = sum([float(jdict["NOVELTY"]) for jdict in json_list]) / float( len(json_list) ) new_dict = { "ARXIVID": json_list[0]["ARXIVID"], "COMMENT": json_list[0]["COMMENT"], "RELEVANCE": rel_score, "NOVELTY": nov_score, } all_dict.append(new_dict) return all_dict, calc_price(config["SELECTION"]["model"], completion.usage)
def paper_to_string(paper_entry: Paper) -> str:
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: BobaZooba/xllm # Path: src/xllm/enums.py class General: class Transformers: class Registry: class Datasets: class Collators: class Trainers: class Experiments: class EnvironmentVariables: class LogLevel: # Path: src/xllm/datasets/registry.py # Path: src/xllm/datasets/soda.py class SodaDataset(BaseDataset): HEADER_KEY = "header" DIALOG_KEY = "dialog" _HF_DATASET_ID = "allenai/soda" def __init__(self, data: List[RawSample], header_drop_probability: float = 0.05): super().__init__(data=data) self.header_drop_probability = header_drop_probability @classmethod def get_data(cls, config: Config) -> Optional[Tuple[List[RawSample], Optional[List[RawSample]]]]: soda_dataset = datasets.load_dataset(cls._HF_DATASET_ID) parsed_data: Dict[str, List[RawSample]] = dict() known_indices = set() for split in ["train", "test"]: parsed_data[split] = list() for sample in tqdm(soda_dataset[split], desc=f"Parsing SODA {split}"): index = sample.get("original_index") if index in known_indices: continue parsed_sample = { cls.HEADER_KEY: sample.get("narrative"), cls.DIALOG_KEY: [ f"{speaker}: {phrase}" for speaker, phrase in zip(sample.get("speakers"), sample.get("dialogue")) ], } parsed_data[split].append(parsed_sample) known_indices.add(index) train = parsed_data["train"] valid = parsed_data["test"] return train, valid def get_sample(self, index: int) -> RawSample: sample = self.data[index] dialog = sample[self.DIALOG_KEY] phrases = list() if not isinstance(dialog, list): raise ValueError(f"{self.DIALOG_KEY} of sample is not a list: {type(dialog)}") for phrase in dialog: if isinstance(phrase, str): phrases.append(phrase) if self.HEADER_KEY in sample: header = sample[self.HEADER_KEY] is_drop_header = np.random.rand() <= self.header_drop_probability if not is_drop_header and isinstance(header, str): phrases.insert(0, header) sample = {enums.General.text_parts: [phrase.replace("\n", " ").replace("\r", " ") for phrase in phrases]} return sample # Path: tests/helpers/dummy_data.py DATA = [ { enums.General.text_parts: [ "Person 1: Hello", "Person 2: It's me", "Person 1: I was wondering", ] }, { enums.General.text_parts: [ "You are a sith lord", "Kenobi: Hello there", "General Grievous: General Kenobi", ] }, ] # Path: tests/unit/datasets/test_registry.py from src.xllm import enums from src.xllm.datasets.registry import datasets_registry from src.xllm.datasets.soda import SodaDataset from tests.helpers.dummy_data import DATA # Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def test_get_soda_dataset() -> None: dataset_cls = datasets_registry.get(key=enums.Datasets.soda) dataset = dataset_cls(data=DATA)
assert isinstance(dataset, SodaDataset)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: banodoco/Steerable-Motion # Path: imports/AdvancedControlNet/control.py class LatentKeyframeImport: def __init__(self, batch_index: int, strength: float) -> None: self.batch_index = batch_index self.strength = strength # Path: imports/AdvancedControlNet/control.py class LatentKeyframeGroupImport: def __init__(self) -> None: self.keyframes: list[LatentKeyframeImport] = [] def add(self, keyframe: LatentKeyframeImport) -> None: added = False # replace existing keyframe if same batch_index for i in range(len(self.keyframes)): if self.keyframes[i].batch_index == keyframe.batch_index: self.keyframes[i] = keyframe added = True break if not added: self.keyframes.append(keyframe) self.keyframes.sort(key=lambda k: k.batch_index) def get_index(self, index: int) -> Union[LatentKeyframeImport, None]: try: return self.keyframes[index] except IndexError: return None def __getitem__(self, index) -> LatentKeyframeImport: return self.keyframes[index] def is_empty(self) -> bool: return len(self.keyframes) == 0 def clone(self) -> 'LatentKeyframeGroupImport': cloned = LatentKeyframeGroupImport() for tk in self.keyframes: cloned.add(tk) return cloned # Path: imports/AdvancedControlNet/control.py class StrengthInterpolationImport: LINEAR = "linear" EASE_IN = "ease-in" EASE_OUT = "ease-out" EASE_IN_OUT = "ease-in-out" NONE = "none" # Path: imports/AdvancedControlNet/logger.py class ColoredFormatter(logging.Formatter): COLORS = { "DEBUG": "\033[0;36m", # CYAN "INFO": "\033[0;32m", # GREEN "WARNING": "\033[0;33m", # YELLOW "ERROR": "\033[0;31m", # RED "CRITICAL": "\033[0;37;41m", # WHITE ON RED "RESET": "\033[0m", # RESET COLOR } def format(self, record): # Path: imports/AdvancedControlNet/latent_keyframe_nodes.py from typing import Union from collections.abc import Iterable from .control import LatentKeyframeImport, LatentKeyframeGroupImport from .control import StrengthInterpolationImport as SI from .logger import logger import numpy as np class LatentKeyframeNodeImport: @classmethod def INPUT_TYPES(s): return { "required": { "batch_index": ("INT", {"default": 0, "min": -1000, "max": 1000, "step": 1}), "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), }, "optional": { "prev_latent_kf": ("LATENT_KEYFRAME", ), } } RETURN_NAMES = ("LATENT_KF", ) RETURN_TYPES = ("LATENT_KEYFRAME", ) FUNCTION = "load_keyframe" CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/keyframes" def load_keyframe(self, batch_index: int, strength: float, prev_latent_kf: LatentKeyframeGroupImport=None, prev_latent_keyframe: LatentKeyframeGroupImport=None, # old name ): prev_latent_keyframe = prev_latent_keyframe if prev_latent_keyframe else prev_latent_kf if not prev_latent_keyframe: prev_latent_keyframe = LatentKeyframeGroupImport() else: prev_latent_keyframe = prev_latent_keyframe.clone()
keyframe = LatentKeyframeImport(batch_index, strength)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: innovatorved/subtitle # Path: app/models/models.py def download_model(model_name): def download_file(url, filepath): # Path: app/utils/checks.py def check_models_exist(name: str): try: if model_names[name] in os.listdir(os.path.join(os.getcwd(), "models")): print("Model {} exists".format(name)) else: print("Model {} does not exist".format(name)) download_model(key) return True except Exception as exc: print("Error in check_models_exist: {}".format(str(exc))) return False # Path: app/utils/utils.py def generate_vtt_file(path: str = None, model="ggml-tiny.bin"): """./whisper -m models/ggml-tiny.en.bin -f Rev.mp3 out.wav -nt --output-vtt""" try: if path is None or not chack_file_exist(path): raise Exception("PATH Error!") rand = uuid.uuid4() output_audio_path: str = f"data/{rand}.wav" vtt_file_path: str = f"data/{rand}.wav.vtt" command: str = f"./binary/whisper -t {NO_OF_THREADS} -p {NO_OF_PROCESSORS} -m models/{model} -f {path} {output_audio_path} -nt --output-vtt" execute_command(command) return [rand, output_audio_path, vtt_file_path] except Exception as exc: logging.error(exc) raise Exception(exc.__str__()) # Path: app/utils/utils.py def merge_video_and_vtt(video_path, vtt_path, output_path): try: if not chack_file_exist(video_path): raise Exception("Video File Not Found!") if not chack_file_exist(vtt_path): raise Exception("VTT File Not Found!") # Load the input files video = ffmpeg.input(video_path) subtitles = ffmpeg.input(vtt_path) merged = ffmpeg.output( video, subtitles, output_path, vcodec="copy", scodec="mov_text", ) ffmpeg.run(merged, overwrite_output=True) return True except Exception as exc: raise Exception(f"An error occurred: {exc}") # Path: app/core/app.py import logging from app.models import download_model, model_names from app.utils.checks import check_models_exist from app.utils import generate_vtt_file, merge_video_and_vtt # Configure logging logger = logging.getLogger(__name__) def process_video(file, model="base"): """ add_subtitle_in_video @param file: video file path @param model: model name @return: [vtt_file_path , output_file] """ try: if not check_models_exist(model): download_model(model) output_file = f"{file.split('.')[0]}_subtitled.{file.split('.')[1]}"
process_id, output_audio_path, vtt_file_path = generate_vtt_file(
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: x0rzavi/github-readme-terminal # Path: gifos/utils/load_config.py def load_toml(file_name: str) -> dict: def __update_config_with_env_vars(config, prefix="GIFOS"): # Path: gifos/utils/schemas/imagebb_image.py class ImgbbImage: """A class to represent an image uploaded to ImgBB. This class represents an image uploaded to ImgBB. Attributes: id: A string that represents the image's ID on ImgBB. url: A string that represents the image's URL on ImgBB. delete_url: A string that represents the URL to delete the image from ImgBB. file_name: A string that represents the name of the image file. expiration: A string that represents the expiration time of the image. size: A string that represents the size of the image. mime: A string that represents the MIME type of the image. extension: A string that represents the extension of the image file. """ __slots__ = [ "id", "url", "delete_url", "file_name", "expiration", "size", "mime", "extension", ] id: str url: str delete_url: str file_name: str expiration: str size: str mime: str extension: str # Path: gifos/utils/upload_imgbb.py from base64 import b64encode from dotenv import load_dotenv from gifos.utils.load_config import gifos_settings from gifos.utils.schemas.imagebb_image import ImgbbImage import os import requests import sys """This module contains a function for uploading an image to ImgBB.""" load_dotenv() IMGBB_API_KEY = os.getenv("IMGBB_API_KEY") ENDPOINT = "https://api.imgbb.com/1/upload" def upload_imgbb(file_name: str, expiration: int = None) -> ImgbbImage: """Upload an image to ImgBB. This function uploads an image to ImgBB using the ImgBB API. The function reads the image file, encodes it in base64, and sends a POST request to the ImgBB API. The function uses the `IMGBB_API_KEY` environment variable for authentication and the `ENDPOINT` constant for the API endpoint. If the `debug` configuration value is True, the function sets the image expiration time to 10 minutes. :param file_name: The name of the image file to upload. :type file_name: str :param expiration: The expiration time for the image in seconds. If the `debug` configuration value is True, this parameter is ignored and the expiration time is set to 10 minutes. The value must be between 60 and 15552000 (6 months) if provided. :type expiration: int, optional :return: An `ImgbbImage` object containing the uploaded image's information if the upload is successful, otherwise None. :rtype: ImgbbImage or None """ if not IMGBB_API_KEY: print("ERROR: Please provide IMGBB_API_KEY") sys.exit(1)
if gifos_settings.get("general", {}).get("debug"):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Zaloog/kanban-python # Path: src/kanban_python/config.py class KanbanConfig: def __init__(self, path=CONFIG_FILE_PATH) -> None: def __repr__(self) -> str: def save(self): def config(self) -> configparser.ConfigParser: def active_board(self) -> str: def active_board(self, new_board): def kanban_boards(self) -> list: def kanban_boards_dict(self) -> dict: def kanban_boards_dict(self, board_name: str) -> dict: def active_board_path(self) -> str: def show_footer(self): def show_footer(self, visible): def col_min_width(self) -> int: def col_min_width(self, new_width: int) -> None: def kanban_columns_dict(self) -> dict: def kanban_columns_dict(self, updated_dict) -> dict: def vis_cols(self) -> list: def done_limit(self) -> int: def done_limit(self, new_limit: int) -> None: def scanned_files(self) -> list: def scanned_files(self, new_files_to_scan: str) -> None: def scanned_patterns(self) -> list: def scanned_patterns(self, new_patterns_to_scan: str) -> None: def create_init_config(conf_path=CONFIG_PATH, data_path=DATA_PATH): def delete_current_folder_board_from_config( cfg=cfg, curr_path: str = str(Path.cwd()) ) -> None: def check_if_board_name_exists_in_config(boardname: str, cfg=cfg) -> bool: def check_if_current_active_board_in_board_list(cfg=cfg) -> bool: def delete_board_from_config(board_name, cfg=cfg) -> None: def check_config_exists(path=CONFIG_FILE_PATH) -> bool: def get_json_path(boardname: str): # Path: src/kanban_python/constants.py BOARD_CAPTION_STRING = "Tasks have the following Structure:\ [[cyan]ID[/]] ([orange3]TAG[/]) [white]Task Title[/] |[red]Days Left[/]|" # Path: src/kanban_python/constants.py COLOR_DICT = { "Ready": "[red]Ready[/]", "Doing": "[yellow]Doing[/]", "Done": "[green]Done[/]", "Deleted": "[deep_pink4]Deleted[/]", "Archived": "[dark_goldenrod]Archived[/]", } # Path: src/kanban_python/constants.py CONFIG_FILE_PATH = CONFIG_PATH / CONFIG_FILE_NAME # Path: src/kanban_python/constants.py FOOTER = [FOOTER_FIRST, FOOTER_LAST] # Path: src/kanban_python/constants.py REPORT_COLORS = ["#161b22", "#0e4429", "#006d32", "#26a641", "#39d353"] # Path: src/kanban_python/utils.py def get_motivational_quote() -> str: def current_time_to_str() -> str: def calculate_time_delta_str(start_time_str: str, end_time_str: str) -> float: def create_status_dict_for_rows(data: dict, vis_cols: list) -> dict: def check_if_done_col_leq_X(cfg, data: dict) -> bool: def check_if_there_are_visible_tasks_in_board(data: dict, vis_cols: list) -> bool: def move_first_done_task_to_archive(data: dict): def delete_json_file(db_path: str) -> None: def check_board_name_valid(boardname: str): def scan_files(path=Path.cwd(), endings: list = [".py"]): def recursive_search(path, file_list: list, progress): def scan_for_todos( file_paths: list, rel_path=Path.cwd(), patterns: list = ["#TODO", "# TODO"] ) -> list: def split_todo_in_tag_and_title(todo: str, patterns: list): def get_tag_id_choices(data_dict: dict, vis_cols: list) -> list: def check_scanner_files_valid(files: str) -> bool: def check_scanner_patterns_valid(patterns: str) -> bool: def get_iso_calender_info(date_str: str): def create_dict_for_report_view(completed_tasks: list): def create_color_mapping(amount_list: list, max_val: int): def create_report_document(boards_dict: dict): def check_due_date_format(date_str: str) -> bool: def due_date_datetime_to_date(date_datetime: str) -> str: def due_date_date_to_datetime(date_str: str) -> str: def calculate_days_left_till_due(due_date: str): # Path: src/kanban_python/interface.py import calendar from datetime import datetime from itertools import zip_longest from rich.prompt import Confirm, IntPrompt, Prompt from rich.table import Table from .config import cfg from .constants import ( BOARD_CAPTION_STRING, COLOR_DICT, CONFIG_FILE_PATH, FOOTER, REPORT_COLORS, ) from .utils import ( calculate_days_left_till_due, calculate_time_delta_str, check_due_date_format, console, create_color_mapping, create_dict_for_report_view, create_status_dict_for_rows, current_time_to_str, due_date_date_to_datetime, due_date_datetime_to_date, ) # Board ##################################################################################### def create_table(data: dict) -> Table: status_dict = create_status_dict_for_rows(data=data, vis_cols=cfg.vis_cols) table_name = cfg.active_board table = Table( title=f"[blue]Active Board: {table_name}[/]", highlight=True, show_header=True, show_footer=True if cfg.show_footer == "True" else False, caption=BOARD_CAPTION_STRING, ) for i, category in enumerate([COLOR_DICT.get(col, col) for col in cfg.vis_cols]): table.add_column( header=category + f"\t({len(status_dict[cfg.vis_cols[i]])} Task/s)", header_style="bold", justify="left", overflow="fold", footer=FOOTER[0] if i == 0 else FOOTER[1] if i == len(cfg.vis_cols) - 1 else "", min_width=cfg.col_min_width, ) for row_tasks in zip_longest(*status_dict.values()): table.add_row(*row_tasks) return table # Board Action selection def input_ask_for_action():
console.print(
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: AMAAI-Lab/mustango # Path: audioldm/latent_diffusion/util.py def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): # select alphas for computing the variance schedule alphas = alphacums[ddim_timesteps] alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) # according the the formula provided in https://arxiv.org/abs/2010.02502 sigmas = eta * np.sqrt( (1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev) ) if verbose: print( f"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}" ) print( f"For the chosen value of eta, which is {eta}, " f"this results in the following sigma_t schedule for ddim sampler {sigmas}" ) return sigmas, alphas, alphas_prev # Path: audioldm/latent_diffusion/util.py def make_ddim_timesteps( ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True ): if ddim_discr_method == "uniform": c = num_ddpm_timesteps // num_ddim_timesteps ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) elif ddim_discr_method == "quad": ddim_timesteps = ( (np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2 ).astype(int) else: raise NotImplementedError( f'There is no ddim discretization method called "{ddim_discr_method}"' ) # assert ddim_timesteps.shape[0] == num_ddim_timesteps # add one to get the final alpha values right (the ones from first scale to data during sampling) steps_out = ddim_timesteps + 1 if verbose: print(f"Selected timesteps for ddim sampler: {steps_out}") return steps_out # Path: audioldm/latent_diffusion/util.py def noise_like(shape, device, repeat=False): repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat( shape[0], *((1,) * (len(shape) - 1)) ) noise = lambda: torch.randn(shape, device=device) return repeat_noise() if repeat else noise() # Path: audioldm/latent_diffusion/util.py def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t).contiguous() return out.reshape(b, *((1,) * (len(x_shape) - 1))).contiguous() # Path: audioldm/latent_diffusion/ddim.py import torch import numpy as np from tqdm import tqdm from audioldm.latent_diffusion.util import ( make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor, ) """SAMPLING ONLY.""" class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule( self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True ): self.ddim_timesteps = make_ddim_timesteps( ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose, ) alphas_cumprod = self.model.alphas_cumprod assert ( alphas_cumprod.shape[0] == self.ddpm_num_timesteps ), "alphas have to be defined for each timestep" to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) self.register_buffer("betas", to_torch(self.model.betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer( "alphas_cumprod_prev", to_torch(self.model.alphas_cumprod_prev) ) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer( "sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod.cpu())) ) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())), ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod.cpu())) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu())) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)), ) # ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: lxmusics/lx-music-api-server-python # Path: common/Httpx.py def is_valid_utf8(text): def is_plain_text(text): def convert_dict_to_form_string(dic): def log_plaintext(text): def request(url, options = {}): def _json(): def checkcn(): def __init__(self, status, content, headers): def json(self): async def convert_to_requests_response(aiohttp_response): async def AsyncRequest(url, options = {}): class ClientResponse: # Path: common/utils.py def createBase64Encode(data_bytes): def createHexEncode(data_bytes): def createBase64Decode(data): def createHexDecode(data): def handleInflateRawSync(data): def require(module): def addToGlobalNamespace(key, data): def filterFileName(filename): def createMD5(s: str): def readFile(path, mode = "text"): def unique_list(list_in): def encodeURIComponent(component): def decodeURIComponent(component): def encodeURI(uri): def decodeURI(uri): def sortDict(dictionary): def mergeDict(dict1, dict2): def __init__(self, d): def __setattr__(self, key, value): def to_dict(self): def __getattr__(self, UNUSED): def dump_xml(data): def load_xml(data): def sizeFormat(size): def timeLengthFormat(t): def timestamp_format(t): class CreateObject(dict): # Path: common/exceptions.py class FailedException(Exception): # 此错误用于处理代理API请求失败的情况 pass # Path: modules/kg/utils.py def buildRequestParams(dictionary): joined_str = '&'.join([f'{k}={v}' for k, v in dictionary.items()]) return joined_str # Path: modules/kg/search.py from common import Httpx from common import utils from common.exceptions import FailedException from .utils import buildRequestParams # ---------------------------------------- # - mode: python - # - author: helloplhm-qwq - # - name: search.py - # - project: lx-music-api-server - # - license: MIT - # ---------------------------------------- # This file is part of the "lx-music-api-server" project. def formatSubResult(l): res = [] for songinfo in l: fileinfo = {} if (songinfo['FileSize'] != 0): fileinfo['128k'] = { 'hash': songinfo['FileHash'], 'size': utils.sizeFormat(songinfo['FileSize']), } if (songinfo['HQFileSize'] != 0): fileinfo['320k'] = { 'hash': songinfo['HQFileHash'], 'size': utils.sizeFormat(songinfo['HQFileSize']), } if (songinfo['SQFileSize'] != 0): fileinfo['flac'] = { 'hash': songinfo['SQFileHash'], 'size': utils.sizeFormat(songinfo['SQFileSize']), } if (songinfo['ResFileSize'] != 0): fileinfo['flac24bit'] = { 'hash': songinfo['ResFileHash'], 'size': utils.sizeFormat(songinfo['ResFileSize']), } res.append({ 'name': songinfo['SongName'], 'name_ori': songinfo['OriSongName'], 'name_extra': songinfo['SongName'].replace(songinfo['OriSongName'], ''), 'singer': songinfo['SingerName'], 'singer_list': [{'name': i['name'], 'id': i['id']} for i in songinfo['Singers']], 'isoriginal': True if (songinfo['IsOriginal'] == 1) else False, 'tag': songinfo.get('TagContent') if songinfo.get('TagContent') else '', 'format_length': utils.timeLengthFormat(songinfo['Duration']), 'length': songinfo['Duration'], 'hash': songinfo['FileHash'], 'file_info': fileinfo, 'songmid': songinfo['Audioid'], 'album_id': songinfo['AlbumID'], 'album': songinfo['AlbumName'], 'language': songinfo['trans_param'].get('language') if songinfo['trans_param'] else '', 'cover': songinfo['Image'].format(size = 1080), 'sizable_cover': songinfo['Image'], 'mvid': songinfo['MvHash'], }) return res async def getSongSearchResult(query, page, size): req = await Httpx.AsyncRequest(utils.encodeURI(f'https://songsearch.kugou.com/song_search_v2?' + buildRequestParams({ "keyword": query, "page": page, "pagesize": size, "userid": 0, "clientver": "", "platform": "WebFilter", "filter": 2, "iscorrection": 1, "privilege_filter": 0 })), { "headers": { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.142.86 Safari/537.36", "Referer": "https://www.kugou.com", } }) body = req.json() if (body['status'] != 1):
raise FailedException('歌曲搜索失败')
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: ai-forever/Kandinsky-3 # Path: kandinsky3/model/utils.py def exist(item): return item is not None # Path: kandinsky3/model/utils.py def set_default_layer(condition, layer_1, args_1=[], kwargs_1={}, layer_2=Identity, args_2=[], kwargs_2={}): if condition: return layer_1(*args_1, **kwargs_1) else: return layer_2(*args_2, **kwargs_2) # Path: kandinsky3/model/nn.py import math import torch from torch import nn, einsum from einops import rearrange, repeat from .utils import exist, set_default_layer class Identity(nn.Module): def __init__(self, *args, **kwargs): super().__init__() @staticmethod def forward(x, *args, **kwargs): return x class SinusoidalPosEmb(nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, x): half_dim = self.dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, device=x.device) * -emb) emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j') return torch.cat((emb.sin(), emb.cos()), dim=-1) class ConditionalGroupNorm(nn.Module): def __init__(self, groups, normalized_shape, context_dim): super().__init__() self.norm = nn.GroupNorm(groups, normalized_shape, affine=False) self.context_mlp = nn.Sequential( nn.SiLU(), nn.Linear(context_dim, 2 * normalized_shape) ) self.context_mlp[1].weight.data.zero_() self.context_mlp[1].bias.data.zero_() def forward(self, x, context): context = self.context_mlp(context) ndims = ' 1' * len(x.shape[2:]) context = rearrange(context, f'b c -> b c{ndims}') scale, shift = context.chunk(2, dim=1) x = self.norm(x) * (scale + 1.) + shift return x class Attention(nn.Module): def __init__(self, in_channels, out_channels, context_dim, head_dim=64): super().__init__() assert out_channels % head_dim == 0 self.num_heads = out_channels // head_dim self.scale = head_dim ** -0.5 self.to_query = nn.Linear(in_channels, out_channels, bias=False) self.to_key = nn.Linear(context_dim, out_channels, bias=False) self.to_value = nn.Linear(context_dim, out_channels, bias=False) self.output_layer = nn.Linear(out_channels, out_channels, bias=False) def forward(self, x, context, context_mask=None): query = rearrange(self.to_query(x), 'b n (h d) -> b h n d', h=self.num_heads) key = rearrange(self.to_key(context), 'b n (h d) -> b h n d', h=self.num_heads) value = rearrange(self.to_value(context), 'b n (h d) -> b h n d', h=self.num_heads) attention_matrix = einsum('b h i d, b h j d -> b h i j', query, key) * self.scale
if exist(context_mask):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: spfrommer/torchexplorer # Path: torchexplorer/components/tooltip.py class Tooltip: """The tooltip that pops up next to a Module.""" def __init__(self, title: str, keys: list[str], vals: list[str]): self.title = title self.keys = keys self.vals = vals @classmethod def create_io(cls, tracker: SizeTracker) -> 'Tooltip': name = tracker.type.split('.')[-1] keys, vals = ['size'], [str(tracker.size).replace('None', dash)] return Tooltip(name, keys, vals) @classmethod def create_moduleinvocation( cls, module: Module, parent_module: Module, invocation_id: InvocationId ) -> 'Tooltip': name_in_parent = cls._get_name_in_parent(module, parent_module) io_shape_keys, io_shape_vals = cls._get_io_shape_keyvals(module, invocation_id) extra_repr_keys, extra_repr_vals = cls._get_extra_repr_keyvals(module) keys = io_shape_keys + extra_repr_keys vals = io_shape_vals + extra_repr_vals assert len(keys) == len(vals) return Tooltip(name_in_parent, keys, vals) @classmethod def create_attach(cls, module: Module) -> 'Tooltip': return cls.create_io(module.torchexplorer_metadata.input_sizes[0][0]) @classmethod def _get_name_in_parent(cls, module: Module, parent_module: Module) -> str: name_in_parent = '' for name, m in parent_module.named_children(): if m == module: name_in_parent = name break if isinstance(m, ModuleList): for i, mm in enumerate(m): if mm == module: name_in_parent = f'{name}[{i}]' break if isinstance(m, ModuleDict): for k, mm in m.items(): if mm == module: name_in_parent = f'{name}[{k}]' break return name_in_parent @classmethod def _get_io_shape_keyvals( cls, module: Module, invocation_id: InvocationId ) -> tuple[list[str], list[str]]: metadata = module.torchexplorer_metadata keys, vals = [], [] one_input = len(metadata.input_sizes[invocation_id]) == 1 for i, input_tracker in enumerate(metadata.input_sizes[invocation_id]): keys.append('in_size' if one_input else f'in{i}_size') vals.append(str(input_tracker.size).replace('None', dash)) one_output = len(metadata.output_sizes[invocation_id]) == 1 for i, output_tracker in enumerate(metadata.output_sizes[invocation_id]): keys.append('out_size' if one_output else f'out{i}_size') vals.append(str(output_tracker.size).replace('None', dash)) return keys, vals @classmethod def _get_extra_repr_keyvals(cls, module: Module) -> tuple[list[str], list[str]]: try: keys, vals = [], [] extra_rep = module.extra_repr() pairs = re.split(r',\s*(?![^()]*\))(?![^[]]*\])', extra_rep) for pair in pairs: if pair == '': continue k, v = pair.split('=') if ('=' in pair) else (dash, pair) keys.append(k.strip()) vals.append(v.strip()) except Exception: keys, vals = [], [] return keys, vals # Path: torchexplorer/core.py class ModuleInvocationHistograms: """The histograms associated to a particular InvocationId on a module.""" input_hists: list[IncrementalHistogram] = field(default_factory=lambda: []) output_hists: list[IncrementalHistogram] = field(default_factory=lambda: []) # Path: torchexplorer/core.py class ModuleSharedHistograms: """The histograms are shared across all InvocationId on a module.""" param_hists: dict[ParamName, IncrementalHistogram] = dict_field() param_grad_hists: dict[ParamName, IncrementalHistogram] = dict_field() # Path: torchexplorer/render/structs.py from typing import Optional from dataclasses import dataclass, field from torchexplorer.components.tooltip import Tooltip from torchexplorer.core import ( ModuleInvocationHistograms, ModuleSharedHistograms ) from __future__ import annotations @dataclass class EdgeLayout: path_points: list[list[float]] arrowhead_points: list[list[float]] downstream_input_index: Optional[int] upstream_output_index: Optional[int] @dataclass class TooltipLayout: tooltip: Tooltip # Coordinates in parent of the layout this tooltip belongs to bottom_left_corner: list[float] = field(default_factory=lambda: [0, 0]) top_right_corner: list[float] = field(default_factory=lambda: [0, 0]) # Either a specific module invocation or for IO @dataclass class NodeLayout: display_name: Optional[str] = None tooltip: Optional[TooltipLayout] = None invocation_hists: Optional[ModuleInvocationHistograms] = None invocation_grad_hists: Optional[ModuleInvocationHistograms] = None
shared_hists: Optional[ModuleSharedHistograms] = None
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: namin/llm-verified-with-monte-carlo-tree-search # Path: lang_config.py STOP_WORD = "\n" # Path: model_config.py BASE_MODEL_NAME = args.base_model_name # Path: model_config.py PEFT_MODEL_PATH = args.peft_model_path # Path: model_config.py PPO_MODEL_PATH = args.ppo_model_path # Path: model_config.py CUSTOM_STOP = args.custom_stop # Path: model_config.py SAME_FOR_MANY_SAMPLES = args.same_for_many_samples # Path: model_config.py BEAM_SEARCH = args.beam_search # Path: model_config.py MODEL_ARG_TOP_K = args.model_arg_topk # Path: model_config.py MODEL_ARG_TOP_P = args.model_arg_topp # Path: model_config.py MODEL_ARG_TEMP = args.model_arg_temp # Path: huggingface_generate.py import torch from transformers import AutoModelForCausalLM, BitsAndBytesConfig, AutoTokenizer from trl import AutoModelForCausalLMWithValueHead from peft import PeftModel from lang_config import STOP_WORD from model_config import BASE_MODEL_NAME, PEFT_MODEL_PATH, PPO_MODEL_PATH, CUSTOM_STOP, SAME_FOR_MANY_SAMPLES, BEAM_SEARCH, MODEL_ARG_TOP_K, MODEL_ARG_TOP_P, MODEL_ARG_TEMP from typing import List def load_model( base_model_name: str = BASE_MODEL_NAME, ppo_model_path: str = PPO_MODEL_PATH, peft_model_path: str = PEFT_MODEL_PATH, ) -> (AutoModelForCausalLM, PeftModel, AutoTokenizer): bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, ) if ppo_model_path is None: base_model = AutoModelForCausalLM.from_pretrained( base_model_name, quantization_config=bnb_config, device_map="auto", trust_remote_code=True, use_auth_token=True, ) tokenizer = AutoTokenizer.from_pretrained( base_model_name, trust_remote_code=True ) else: base_model = AutoModelForCausalLMWithValueHead.from_pretrained( ppo_model_path, quantization_config=bnb_config ) tokenizer = AutoTokenizer.from_pretrained(ppo_model_path) tokenizer.pad_token = tokenizer.eos_token model = ( PeftModel.from_pretrained(base_model, peft_model_path) if peft_model_path else base_model ) return (base_model, model, tokenizer) def stop_words_ids(tokenizer: AutoTokenizer) -> List[int]: # Hack: we want the stop word as it is encoded glued to another word. stop_word_id = tokenizer.encode("hello" + STOP_WORD, add_special_tokens=False)[-1] quote_word_id = tokenizer.encode("```", add_special_tokens=False)[-1] return [stop_word_id, quote_word_id] def get_model_generation_token_args( tokenizer: AutoTokenizer, custom_stop: bool = CUSTOM_STOP ): return dict( min_length=5, max_new_tokens=100, eos_token_id=stop_words_ids(tokenizer) if custom_stop else tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id, ) def get_model_generation_search_args( num: int, beam_search: bool = BEAM_SEARCH ): if beam_search: return dict( num_beams=num, num_beam_groups=num, diversity_penalty=0.9, ) else: return dict( top_k=MODEL_ARG_TOP_K if MODEL_ARG_TOP_K is not None else 50 if num>1 and not SAME_FOR_MANY_SAMPLES else 7,
top_p=MODEL_ARG_TOP_P if MODEL_ARG_TOP_P is not None else 0.9,
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: BraveGroup/Drive-WM # Path: src/diffusers/dependency_versions_check.py def dep_version_check(pkg, hint=None): require_version(deps[pkg], hint) # Path: src/diffusers/utils/import_utils.py ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} # Path: src/diffusers/utils/import_utils.py def is_peft_available(): return _peft_available # Path: src/diffusers/utils/import_utils.py def is_transformers_available(): return _transformers_available # Path: src/diffusers/utils/constants.py import importlib import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home from packaging import version from ..dependency_versions_check import dep_version_check from .import_utils import ENV_VARS_TRUE_VALUES, is_peft_available, is_transformers_available # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. default_cache_path = HUGGINGFACE_HUB_CACHE MIN_PEFT_VERSION = "0.6.0" MIN_TRANSFORMERS_VERSION = "4.34.0" _CHECK_PEFT = os.environ.get("_CHECK_PEFT", "1") in ENV_VARS_TRUE_VALUES CONFIG_NAME = "config.json" WEIGHTS_NAME = "diffusion_pytorch_model.bin" FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack" ONNX_WEIGHTS_NAME = "model.onnx" SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors" ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb" HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co") DIFFUSERS_CACHE = default_cache_path DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules" HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules")) DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"] # Below should be `True` if the current version of `peft` and `transformers` are compatible with # PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are # available. # For PEFT it is has to be greater than or equal to 0.6.0 and for transformers it has to be greater than or equal to 4.34.0. _required_peft_version = is_peft_available() and version.parse( version.parse(importlib.metadata.version("peft")).base_version ) >= version.parse(MIN_PEFT_VERSION)
_required_transformers_version = is_transformers_available() and version.parse(
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: basnijholt/unidep # Path: unidep/_dependencies_parsing.py def find_requirements_files( base_dir: str | Path = ".", depth: int = 1, *, verbose: bool = False, ) -> list[Path]: """Scan a directory for `requirements.yaml` and `pyproject.toml` files.""" base_path = Path(base_dir) found_files = [] # Define a helper function to recursively scan directories def _scan_dir(path: Path, current_depth: int) -> None: if verbose: print(f"🔍 Scanning in `{path}` at depth {current_depth}") if current_depth > depth: return for child in path.iterdir(): if child.is_dir(): _scan_dir(child, current_depth + 1) elif child.name == "requirements.yaml": found_files.append(child) if verbose: print(f'🔍 Found `"requirements.yaml"` at `{child}`') elif child.name == "pyproject.toml" and unidep_configured_in_toml(child): if verbose: print(f'🔍 Found `"pyproject.toml"` with dependencies at `{child}`') found_files.append(child) _scan_dir(base_path, 0) return sorted(found_files) # Path: unidep/_dependencies_parsing.py def parse_local_dependencies( *paths: Path, check_pip_installable: bool = True, verbose: bool = False, ) -> dict[Path, list[Path]]: """Extract local project dependencies from a list of `requirements.yaml` or `pyproject.toml` files. Works by loading the specified `local_dependencies` list. """ # noqa: E501 dependencies: dict[str, set[str]] = defaultdict(set) for p in paths: if verbose: print(f"🔗 Analyzing dependencies in `{p}`") base_path = p.resolve().parent _extract_local_dependencies( path=p, base_path=base_path, processed=set(), dependencies=dependencies, check_pip_installable=check_pip_installable, verbose=verbose, ) return { Path(k): sorted({Path(v) for v in v_set}) for k, v_set in sorted(dependencies.items()) } # Path: unidep/_pytest_plugin.py import os import sys import pytest from pathlib import Path from typing import TYPE_CHECKING from unidep._dependencies_parsing import ( find_requirements_files, parse_local_dependencies, ) from git import Repo """unidep - Unified Conda and Pip requirements management. Pytest plugin for running only tests of changed files. WARNING: Still experimental and not documented. """ from __future__ import annotations if TYPE_CHECKING: def pytest_addoption(parser: pytest.Parser) -> None: # pragma: no cover """Add options to the pytest command line.""" parser.addoption( "--run-affected", action="store_true", default=False, help="Run only tests from affected packages", ) parser.addoption( "--branch", action="store", default="origin/main", help="Branch to compare with for finding affected tests", ) parser.addoption( "--repo-root", action="store", default=".", type=Path, help="Root of the repository", ) def pytest_collection_modifyitems( config: pytest.Config, items: list[pytest.Item], ) -> None: # pragma: no cover """Filter tests based on the --run-affected option.""" if not config.getoption("--run-affected"): return try: except ImportError: print( "🛑 You need to install `gitpython` to use the `--run-affected` option." "run `pip install gitpython` to install it.", ) sys.exit(1) compare_branch = config.getoption("--branch") repo_root = Path(config.getoption("--repo-root")).absolute() repo = Repo(repo_root)
found_files = find_requirements_files(repo_root)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: BAAI-DCAI/SegVol # Path: segment_anything_volumetric/modeling/common.py class LayerNorm2d(nn.Module): def __init__(self, num_channels: int, eps: float = 1e-6) -> None: super().__init__() self.weight = nn.Parameter(torch.ones(num_channels)) self.bias = nn.Parameter(torch.zeros(num_channels)) self.eps = eps def forward(self, x: torch.Tensor) -> torch.Tensor: u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x # Path: segment_anything_volumetric/modeling/common.py class MLPBlock(nn.Module): def __init__( self, embedding_dim: int, mlp_dim: int, act: Type[nn.Module] = nn.GELU, ) -> None: super().__init__() self.lin1 = nn.Linear(embedding_dim, mlp_dim) self.lin2 = nn.Linear(mlp_dim, embedding_dim) self.act = act() def forward(self, x: torch.Tensor) -> torch.Tensor: return self.lin2(self.act(self.lin1(x))) # Path: segment_anything_volumetric/modeling/image_encoder.py import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Tuple, Type from .common import LayerNorm2d, MLPBlock from monai.networks.blocks import PatchEmbed # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa class ImageEncoderViT(nn.Module): def __init__( self, img_size: int = 1024, patch_size: int = 16, in_chans: int = 1, embed_dim: int = 768, depth: int = 12, num_heads: int = 12, mlp_ratio: float = 4.0, out_chans: int = 256, qkv_bias: bool = True, norm_layer: Type[nn.Module] = nn.LayerNorm, act_layer: Type[nn.Module] = nn.GELU, use_abs_pos: bool = True, use_rel_pos: bool = False, rel_pos_zero_init: bool = True, window_size: int = 0, global_attn_indexes: Tuple[int, ...] = (), ) -> None: """ Args: img_size (int): Input image size. patch_size (int): Patch size. in_chans (int): Number of input image channels. embed_dim (int): Patch embedding dimension. depth (int): Depth of ViT. num_heads (int): Number of attention heads in each ViT block. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool): If True, add a learnable bias to query, key, value. norm_layer (nn.Module): Normalization layer. act_layer (nn.Module): Activation layer. use_abs_pos (bool): If True, use absolute positional embeddings. use_rel_pos (bool): If True, add relative positional embeddings to the attention map. rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. window_size (int): Window size for window attention blocks. global_attn_indexes (list): Indexes for blocks using global attention. """ super().__init__() self.img_size = img_size # self.patch_embed = PatchEmbed( # kernel_size=(patch_size, patch_size), # stride=(patch_size, patch_size), # in_chans=in_chans, # embed_dim=embed_dim, # ) self.patch_embed = PatchEmbed( patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, spatial_dims=3, ) self.pos_embed: Optional[nn.Parameter] = None if use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = nn.Parameter( torch.zeros(1, img_size // patch_size, img_size // patch_size, img_size // patch_size, embed_dim) ) self.blocks = nn.ModuleList() for i in range(depth): block = Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, norm_layer=norm_layer, act_layer=act_layer, use_rel_pos=use_rel_pos, rel_pos_zero_init=rel_pos_zero_init, window_size=window_size if i not in global_attn_indexes else 0, input_size=(img_size // patch_size, img_size // patch_size), ) self.blocks.append(block) self.neck = nn.Sequential( nn.Conv2d( embed_dim, out_chans, kernel_size=1, bias=False, ),
LayerNorm2d(out_chans),
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: xk-huang/segment-caption-anything # Path: scripts/tools/utils/git_utils/common.py def qd_tqdm(*args, **kwargs): desc = kwargs.get("desc", "") import inspect frame = inspect.currentframe() frames = inspect.getouterframes(frame) frame = frames[1].frame line_number = frame.f_lineno fname = op.basename(frame.f_code.co_filename) message = "{}:{}".format(fname, line_number) if "desc" in kwargs: kwargs["desc"] = message + " " + desc else: kwargs["desc"] = message if "mininterval" not in kwargs: # every 2 secons; default is 0.1 second which is too frequent kwargs["mininterval"] = 2 return tqdm(*args, **kwargs) # Path: scripts/tools/utils/git_utils/common.py def dict_update_path_value(d, p, v): ps = p.split("$") while True: if len(ps) == 1: d[ps[0]] = v break else: if ps[0] not in d: d[ps[0]] = {} d = d[ps[0]] ps = ps[1:] # Path: scripts/tools/utils/git_utils/common.py def dict_get_path_value(d, p, with_type=False): ps = p.split("$") cur_dict = d while True: if len(ps) > 0: k = dict_parse_key(ps[0], with_type) if isinstance(cur_dict, (tuple, list)): cur_dict = cur_dict[int(k)] else: cur_dict = cur_dict[k] ps = ps[1:] else: return cur_dict # Path: scripts/tools/utils/git_utils/common.py def get_all_path(d, with_type=False, leaf_only=True, with_list=True): assert not with_type, "will not support" all_path = [] if isinstance(d, dict): for k, v in d.items(): all_sub_path = get_all_path( v, with_type, leaf_only=leaf_only, with_list=with_list ) all_path.extend([k + "$" + p for p in all_sub_path]) if not leaf_only or len(all_sub_path) == 0: all_path.append(k) elif (isinstance(d, tuple) or isinstance(d, list)) and with_list: for i, _v in enumerate(d): all_sub_path = get_all_path( _v, with_type, leaf_only=leaf_only, with_list=with_list, ) all_path.extend(["{}$".format(i) + p for p in all_sub_path]) if not leaf_only or len(all_sub_path) == 0: all_path.append("{}".format(i)) return all_path # Path: scripts/tools/utils/git_utils/common.py def load_from_yaml_str(s): return yaml.load(s, Loader=yaml.UnsafeLoader) # Path: scripts/tools/utils/git_utils/tsv_io.py import numpy as np import shutil import mmap import time import logging import types import os import os.path as op import subprocess import tempfile import hashlib import logging import struct from .common import qd_tqdm as tqdm from .common import ( dict_update_path_value, dict_get_path_value, get_all_path, load_from_yaml_str, ) from azfuse import File from contextlib import contextmanager from datasets.utils.filelock import FileLock from urllib.parse import urlparse, urlunparse from pathos.multiprocessing import ProcessingPool as Pool # NOTE(xiaoke): Modified. Try to use azfuse.File if possible. try: except ImportError: File = types.SimpleNamespace() File.open = open File.get_file_size = lambda x: os.stat(x).st_size logger = logging.getLogger(__name__) def concat_files(ins, out): File.prepare(ins) with File.open(out, "wb") as fp_out: for i, f in enumerate(ins): logging.info("concating {}/{} - {}".format(i, len(ins), f)) with File.open(f, "rb") as fp_in: shutil.copyfileobj(fp_in, fp_out, 1024 * 1024 * 10) def concat_tsv_files(tsvs, out_tsv): if len(tsvs) == 1 and tsvs[0] == out_tsv: return File.prepare(tsvs) concat_files(tsvs, out_tsv) sizes = [File.get_file_size(t) for t in tsvs] sizes = np.cumsum(sizes) sizes = [0] + sizes[:-1].tolist() concate_lineidx_8b(sizes, tsvs, out_tsv) def get_tmp_folder(): folder = os.environ.get("GIT_TMP_FOLDER", "/tmp") return folder def parallel_map(func, all_task, num_worker=16): if num_worker > 0: with Pool(num_worker) as m: result = m.map(func, all_task) return result else: result = [] for t in all_task: result.append(func(t)) return result def ensure_remove_file(d): if op.isfile(d) or op.islink(d): try: os.remove(d) except: pass def concate_lineidx_8b(sizes, tsvs, out_tsv): File.prepare(tsvs) folder = get_tmp_folder() def row_processor_8b(row): offset, in_tsv, out_tsv = row
fbar = tqdm(unit_scale=True)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: fjzzq2002/is-my-problem-new # Path: src/utils.py def read_problems(filename): # read as a json with open(filename) as f: problems = json.load(f) return [x for x in problems if len(x["statement"].strip()) >= 5] # Path: src/utils.py def dump_json_safe(obj, filename): import tempfile with tempfile.NamedTemporaryFile(mode="w", delete=False) as f: json.dump(obj, f) shutil.move(f.name, filename) # Path: src/utils.py def get_text(tag: bs4.Tag) -> str: _inline_elements = { "a", "span", "em", "strong", "u", "i", "font", "mark", "label", "s", "sub", "sup", "tt", "bdo", "button", "cite", "del", "b", "a", "font", } def _get_text(tag: bs4.Tag) -> typing.Generator: for child in tag.children: if isinstance(child, bs4.Tag): # if the tag is a block type tag then yield new lines before after is_block_element = child.name not in _inline_elements if is_block_element: yield "\n" yield from ["\n"] if child.name == "br" else _get_text(child) if is_block_element: yield "\n" elif isinstance(child, bs4.NavigableString): yield child.string return "".join(_get_text(tag)) # Path: src/scrapper/codeforces.py from ..utils import read_problems, dump_json_safe, get_text from bs4 import BeautifulSoup from tqdm.auto import tqdm import json import os import requests import time import random scrapped_problems = [] try: scrapped_problems = read_problems("problems/codeforces.json") print(f"Recalled {len(scrapped_problems)} scrapped problems") except: print("Cannot find scrapped problems") scrapped_uids = set(p["uid"] for p in scrapped_problems) codeforces_endpoint = "https://codeforces.com/api/problemset.problems" # get list of problems list_problems = requests.get(codeforces_endpoint).json()["result"]["problems"] # the website is down, read problems.txt instead # with open('problems.txt') as f: # list_problems = json.load(f)['result']['problems'] print("# problems:", len(list_problems)) # a scrapper for codeforces def scrap_problem(contestId, index, rating, tags, uid): url = f"https://codeforces.com/contest/{contestId}/problem/{index}" response = requests.get(url, timeout=30) soup = BeautifulSoup(response.content, "html.parser") statement = soup.find(class_="problem-statement") try: statement.find(class_="header").decompose() except: pass statement_body = statement.find("div") statement_body = get_text(statement_body) # \r -> \n, remove duplicate \n, strip statement_body = ( statement_body.replace("\r", "\n") .replace("\n\n", "\n") .replace("$$$", "$") .strip() ) problem = { "uid": uid, "url": url, "tags": tags, # 'raw': str(response.content), "statement": statement_body, "contestId": contestId, "index": index, "rating": rating, } return problem for problem in tqdm(list_problems): contestId, index, rating, tags = ( problem["contestId"], problem["index"], problem.get("rating", -1), problem["tags"], ) uid = f"Codeforces{contestId}{index}" if uid in scrapped_uids: continue print(f"Scrapping {uid}") result = None try: result = scrap_problem(contestId, index, rating, tags, uid) except Exception as e: print("Error while scrapping:", e) if result is not None: scrapped_problems.append(result) time.sleep(0.1) # save to file every 10 problems if random.random() < 0.1:
dump_json_safe(scrapped_problems, "problems/codeforces.json")
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: p0p4k/pflowtts_pytorch # Path: pflow/data/text_mel_datamodule.py class TextMelDataModule(LightningDataModule): def __init__( # pylint: disable=unused-argument self, name, train_filelist_path, valid_filelist_path, batch_size, num_workers, pin_memory, cleaners, add_blank, n_spks, n_fft, n_feats, sample_rate, hop_length, win_length, f_min, f_max, data_statistics, seed, ): super().__init__() # this line allows to access init params with 'self.hparams' attribute # also ensures init params will be stored in ckpt self.save_hyperparameters(logger=False) def setup(self, stage: Optional[str] = None): # pylint: disable=unused-argument """Load data. Set variables: `self.data_train`, `self.data_val`, `self.data_test`. This method is called by lightning with both `trainer.fit()` and `trainer.test()`, so be careful not to execute things like random split twice! """ # load and split datasets only if not loaded already self.trainset = TextMelDataset( # pylint: disable=attribute-defined-outside-init self.hparams.train_filelist_path, self.hparams.n_spks, self.hparams.cleaners, self.hparams.add_blank, self.hparams.n_fft, self.hparams.n_feats, self.hparams.sample_rate, self.hparams.hop_length, self.hparams.win_length, self.hparams.f_min, self.hparams.f_max, self.hparams.data_statistics, self.hparams.seed, ) self.validset = TextMelDataset( # pylint: disable=attribute-defined-outside-init self.hparams.valid_filelist_path, self.hparams.n_spks, self.hparams.cleaners, self.hparams.add_blank, self.hparams.n_fft, self.hparams.n_feats, self.hparams.sample_rate, self.hparams.hop_length, self.hparams.win_length, self.hparams.f_min, self.hparams.f_max, self.hparams.data_statistics, self.hparams.seed, ) def train_dataloader(self): return DataLoader( dataset=self.trainset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers, pin_memory=self.hparams.pin_memory, shuffle=True, collate_fn=TextMelBatchCollate(self.hparams.n_spks), ) def val_dataloader(self): return DataLoader( dataset=self.validset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers, pin_memory=self.hparams.pin_memory, shuffle=False, collate_fn=TextMelBatchCollate(self.hparams.n_spks), ) def teardown(self, stage: Optional[str] = None): """Clean up after fit or test.""" pass # pylint: disable=unnecessary-pass def state_dict(self): # pylint: disable=no-self-use """Extra things to save to checkpoint.""" return {} def load_state_dict(self, state_dict: Dict[str, Any]): """Things to do when loading checkpoint.""" pass # pylint: disable=unnecessary-pass # Path: pflow/utils/logging_utils.py def log_hyperparameters(object_dict: Dict[str, Any]) -> None: # Path: pflow/utils/generate_data_statistics.py import os import sys import argparse import json import sys import rootutils import torch from pathlib import Path from hydra import compose, initialize from omegaconf import open_dict from tqdm.auto import tqdm from pflow.data.text_mel_datamodule import TextMelDataModule from pflow.utils.logging_utils import pylogger r""" The file creates a pickle file where the values needed for loading of dataset is stored and the model can load it when needed. Parameters from hparam.py will be used """ sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
log = pylogger.get_pylogger(__name__)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: theroyallab/tabbyAPI # Path: args.py def convert_args_to_dict(args: argparse.Namespace, parser: argparse.ArgumentParser): """Broad conversion of surface level arg groups to dictionaries""" arg_groups = {} for group in parser._action_groups: group_dict = {} for arg in group._group_actions: value = getattr(args, arg.dest, None) if value is not None: group_dict[arg.dest] = value arg_groups[group.title] = group_dict return arg_groups # Path: args.py def init_argparser(): """Creates an argument parser that any function can use""" parser = argparse.ArgumentParser( epilog="These args are only for a subset of the config. " + "Please edit config.yml for all options!" ) add_network_args(parser) add_model_args(parser) add_logging_args(parser) add_config_args(parser) return parser # Path: start.py import argparse import os import pathlib import subprocess from args import convert_args_to_dict, init_argparser from main import entrypoint """Utility to automatically upgrade and start the API""" def get_requirements_file(): """Fetches the appropriate requirements file depending on the GPU""" requirements_name = "requirements-nowheel" ROCM_PATH = os.environ.get("ROCM_PATH") CUDA_PATH = os.environ.get("CUDA_PATH") # TODO: Check if the user has an AMD gpu on windows if ROCM_PATH: requirements_name = "requirements-amd" # Also override env vars for ROCm support on non-supported GPUs os.environ["ROCM_PATH"] = "/opt/rocm" os.environ["HSA_OVERRIDE_GFX_VERSION"] = "10.3.0" os.environ["HCC_AMDGPU_TARGET"] = "gfx1030" elif CUDA_PATH: cuda_version = pathlib.Path(CUDA_PATH).name if "12" in cuda_version: requirements_name = "requirements" elif "11" in cuda_version: requirements_name = "requirements-cu118" return requirements_name def add_start_args(parser: argparse.ArgumentParser): """Add start script args to the provided parser""" start_group = parser.add_argument_group("start") start_group.add_argument( "-iu", "--ignore-upgrade", action="store_true", help="Ignore requirements upgrade", ) start_group.add_argument( "-nw", "--nowheel", action="store_true", help="Don't upgrade wheel dependencies (exllamav2, torch)", ) if __name__ == "__main__": subprocess.run(["pip", "-V"]) # Create an argparser and add extra startup script args parser = init_argparser() add_start_args(parser) args = parser.parse_args() if args.ignore_upgrade: print("Ignoring pip dependency upgrade due to user request.") else: requirements_file = ( "requirements-nowheel" if args.nowheel else get_requirements_file() ) subprocess.run(["pip", "install", "-U", "-r", f"{requirements_file}.txt"]) # Import entrypoint after installing all requirements
entrypoint(convert_args_to_dict(args, parser))
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: zorazrw/filco # Path: cxmi.py def calc_cxmi_score( model: AutoModelForSeq2SeqLM, tokenizer: AutoTokenizer, answer: str, base_input: str, ctx_input: str, apply_sigmoid: bool = False, ) -> float: """Compute the CXMI score.""" base_probs = get_output_probs(model, tokenizer, base_input, answer) ctx_probs = get_output_probs(model, tokenizer, ctx_input, answer) diff = sent_wise_diff(base_scores=base_probs, ctx_scores=ctx_probs) if apply_sigmoid: diff = sigmoid(diff) return diff # Path: cxmi.py def get_example_inputs( question: str, context: str, answers: list[str], question_prefix: str = "question", context_prefix: str = "context", ) -> dict: """Get example inputs for the generation model.""" base_input = get_input_text( question, context=None, question_prefix=question_prefix, context_prefix=context_prefix, ) ctx_input = get_input_text( question, context=context, question_prefix=question_prefix, context_prefix=context_prefix, ) return { "base_input": base_input, "ctx_input": ctx_input, "answers": answers, } # Path: eval.py def calc_unigram_f1(text: str, answers: list[str], field: str = "f1") -> float: """Calculate unigram f1 score between the text and reference answers.""" norm_pred = normalize_text(text) norm_answers = [normalize_text(ans) for ans in answers] common_tokens = [ Counter(norm_pred) & Counter(norm_ans) for norm_ans in norm_answers ] num_same = [sum(common.values()) for common in common_tokens] score_list = [] for i, num in enumerate(num_same): if num == 0: score_list.append(0.0) else: p = 1.0 * num / len(norm_pred) r = 1.0 * num / len(norm_answers[i]) f1 = 2 * p * r / (p + r) if field == "precision": score_list.append(p) elif field == "recall": score_list.append(r) elif field == "f1": score_list.append(f1) else: raise ValueError(f"Unknown field: {field}") return max(score_list) # Path: eval.py def has_answer(text: str, answers: list[str]) -> float: """Check if text contains any of the answers.""" return float(any([(ans.lower() in text.lower()) for ans in answers])) # Path: utils.py def load_dataset(path: str) -> list[dict]: """Load dataset from JSON or JSONL file.""" if path.endswith(".json"): return json.load(open(path, "r")) elif path.endswith(".jsonl"): return [json.loads(line.strip()) for line in open(path, "r")] else: extension = path.split(".")[-1] raise ValueError(f"File extension [{extension}] not valid.") # Path: utils.py def write_dataset(path: str, dataset: list[dict]): """Write dataset to JSON or JSONL file.""" if path.endswith(".json"): json.dump(dataset, open(path, "w")) elif path.endswith(".jsonl"): with open(path, "w") as fw: for res_dict in dataset: fw.write(json.dumps(res_dict) + "\n") else: extension = path.split(".")[-1] raise ValueError(f"File extension [{extension}] not valid.") # Path: measure_ctxs.py import argparse import torch from nltk.tokenize import sent_tokenize from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from cxmi import calc_cxmi_score, get_example_inputs from eval import calc_unigram_f1, has_answer from utils import load_dataset, write_dataset """Calculate Scores of Individual Sentences in Retrieved Passages.""" def calc_cxmi( text: str, question: str, answers: list[str], tokenizer: AutoTokenizer, model: AutoModelForSeq2SeqLM, ) -> float: """Calculate CXMI score for a context text.""" proc_inputs = get_example_inputs( question=args.prefix + question, context=text, answers=answers, ) cxmi_score = calc_cxmi_score( model=model, tokenizer=tokenizer, answer=proc_inputs["answers"][0], base_input=proc_inputs["base_input"], ctx_input=proc_inputs["ctx_input"], apply_sigmoid=True, ) return cxmi_score def main(): """Run the main context measuring function.""" # load dataset
dataset = load_dataset(args.dataset_path)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: ShipBit/wingman-ai # Path: gui/sections/context_switcher.py class ContextSwitcher(ctk.CTkFrame): # class ContextSwitcher(ctk.CTkScrollableFrame): def __init__(self, master, **kwargs): super().__init__(master, **kwargs) self.grid_columnconfigure(0, weight=1) self.master = master self.contexts = master.core.config_manager.contexts self.context_buttons = {} self.active_context = "" self.spacer = ctk.CTkLabel(self, text="") self.spacer.grid(row=0, column=0) for i, context_name in enumerate(self.contexts): context_button = IconButton(self, icon=f"context-icon_{CONTEXT_COLORS[i % len(CONTEXT_COLORS)]}" if context_name else "context-icon", themed=False, command=lambda c=context_name: self.activate_context(c)) context_button.grid(row=i+1, column=0, padx=14, pady=14) self.context_buttons[context_name] = context_button if not context_name: self.__set_context_button_state("", False) def __set_context_button_state(self, context, active=True): context_button = self.context_buttons.get(context) if context_button: context_button.configure(state="normal" if active else "disabled", fg_color="transparent" if active else ("grey60", "grey40")) def activate_context(self, context): self.__set_context_button_state(self.active_context, True) if self.master: update_context = getattr(self.master, "update_context", None) if callable(update_context): update_context(context) self.active_context = context self.__set_context_button_state(self.active_context, False) # Path: gui/sections/context_runner.py class ContextRunner(ctk.CTkFrame): def __init__(self, master, context="", **kwargs): super().__init__(master, **kwargs) self.core = master.core self.core.load_context(context) self.status_var = ctk.StringVar(self, "Inactive", "status") tower = self.core.tower auto_run = self.core.config_manager.gui_config.get("auto-run", "off") == "on" self.grid_columnconfigure(0, weight=1) self.grid_rowconfigure(3, weight=1) context_title = ( context.title().replace("_", " ").strip() if context else "Default" ) self.title = ctk.CTkLabel( self, text=context_title, font=("TkHeadingFont", 20, "bold"), text_color="#EB154D", ) self.title.grid(row=0, column=0, padx=20, pady=10, sticky="w") # TODO: Make this a component self.status = ctk.CTkLabel( self, textvariable=self.status_var, anchor="w", fg_color=("grey70", "grey30"), corner_radius=10, width=65, pady=3, ) self.status.grid(row=0, column=0, padx=20, pady=10, sticky="e") self.status_icon_active = Icon("state_active", 16, False) self.status_icon_inactive = Icon("state_inactive", 16, False) self.status_led = ctk.CTkLabel( self, image=self.status_icon_inactive, text="", fg_color="transparent" ) self.status_led.grid(row=0, column=0, padx=95, pady=10, sticky="e") wingmen = [] if tower: wingmen = tower.get_wingmen() self.wingmen_list = WingmenList(self, wingmen=wingmen) self.wingmen_list.grid(row=1, column=0, padx=20, pady=10, sticky="ew") broken_wingmen = [] if tower: broken_wingmen = tower.get_broken_wingmen() if len(broken_wingmen) > 0: self.broken_wingmen_list = WingmenList( self, wingmen=broken_wingmen, broken=True ) self.broken_wingmen_list.grid( row=2, column=0, padx=20, pady=10, sticky="ew" ) self.terminal = ctk.CTkTextbox(self) self.terminal.grid(row=3, column=0, padx=20, pady=10, sticky="nesw") self.terminal.configure(state="disabled", wrap="word") printr.set_output("main", self.terminal) if len(wingmen) and not auto_run: printr.print( f"Press 'Run' to start your wingm{'e' if len(wingmen) > 1 else 'a'}n!" ) self.button = ctk.CTkButton( self, text="Run", command=self.toggle_listener, height=45, font=("TkHeadingFont", 22, "bold"), ) self.button.grid(row=4, column=0, padx=20, pady=10, sticky="ew") if not tower: printr.print_err( f"Could not load context.\nPlease check your context configuration for `{context_title}`." ) self.button.configure(state="disabled") elif len(wingmen) <= 0: printr.print_warn(f"No runnable Wingman found for `{context_title}`.") self.button.configure(state="disabled") elif auto_run: self.toggle_listener() def toggle_listener(self): if self.core.active: self.core.deactivate() self.status_var.set("Inactive") self.status_led.configure(image=self.status_icon_inactive) self.button.configure(text="Run") printr.print( "Your Wingman is now inactive.\nPress 'Run' to start listening again." ) else: self.core.activate() self.status_var.set("Active") self.status_led.configure(image=self.status_icon_active) self.button.configure(text="Stop") printr.print( "Your Wingman is now listening for commands.\nPress 'Stop' to stop listening." ) # Path: gui/views/context_view.py import customtkinter as ctk from gui.sections.context_switcher import ContextSwitcher from gui.sections.context_runner import ContextRunner class ContextView(ctk.CTkFrame): def __init__(self, master, **kwargs): super().__init__(master, **kwargs) self.core = master.core self.grid_columnconfigure(1, weight=1) self.grid_rowconfigure(0, weight=1)
self.context_switcher = ContextSwitcher(self, width=88, corner_radius=0)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: OliverMao/FlaskAutoApiBuilder # Path: Faab/Faab.py class Faab(Flask): _startup_message_printed = False models = [] db_config = object() need_register_bp = [] def __init__(self, **options): # 初始化函数,接收一个字符串类型的参数import_name super().__init__(**options) def add_models(self, model: list): # 添加模型函数,接收一个列表类型的参数model self.models = model def add_db_config(self, db_config: object): # 添加数据库配置函数,接收一个对象类型的参数db_config self.db_config = db_config def add_blueprints(self, blueprint: list): # 添加蓝图函数,接收一个列表类型的参数blueprint self.need_register_bp = blueprint def faab_ready(self): create_app(app=self, models=self.models, db_config=self.db_config, url_prefix="/api", blueprints=self.need_register_bp) CORS(self, resources=r'/*') self._print_startup_message() def run( self, host: str | None = None, port: int | None = None, debug: bool | None = None, load_dotenv: bool = True, **options: t.Any, ) -> None: super().run(host, port, debug, load_dotenv, **options) def _print_startup_message(self): if not getattr(self, '_startup_message_printed', False): print("\033[1;32m * Faab Version:", version) print(''' ███████╗ █████╗ █████╗ ██████╗ ██╗ ██╗ ██████╗ ██████╗ ██████╗ ██╗████████╗ ██████╗███╗ ██╗ ██╔════╝██╔══██╗██╔══██╗██╔══██╗ ╚██╗ ██╔╝██╔═══██╗██╔═══██╗██╔══██╗██║╚══██╔══╝██╔════╝████╗ ██║ █████╗ ███████║███████║██████╔╝█████╗╚████╔╝ ██║ ██║██║ ██║██████╔╝██║ ██║ ██║ ██╔██╗ ██║ ██╔══╝ ██╔══██║██╔══██║██╔══██╗╚════╝ ╚██╔╝ ██║ ██║██║ ██║██╔══██╗██║ ██║ ██║ ██║╚██╗██║ ██║ ██║ ██║██║ ██║██████╔╝ ██║ ╚██████╔╝╚██████╔╝██████╔╝██║ ██║██╗╚██████╗██║ ╚████║ ''') self._startup_message_printed = True # Path: Faab/FaabJWT.py def jwt_authentication(): """ 1.获取请求头Authorization中的token 2.判断是否以 Bearer开头 3.使用jwt模块进行校验 4.判断校验结果,成功就提取token中的载荷信息,赋值给g对象保存 """ auth = request.headers.get('Authorization') if auth and auth.startswith('Bearer '): "提取token 0-6 被Bearer和空格占用 取下标7以后的所有字符" token = auth[7:] "校验token" g.username = None try: "判断token的校验结果" payload = jwt.decode(token, SALT, algorithms=['HS256']) "获取载荷中的信息赋值给g对象" g.username = payload.get('username') except exceptions.ExpiredSignatureError: # 'token已失效' g.username = -1 except jwt.DecodeError: # 'token认证失败' g.username = -2 except jwt.InvalidTokenError: # '非法的token' g.username = -3 # print(g.username) # Path: demo/app.py from Faab import Faab from Faab.FaabJWT import jwt_authentication from blueprints.test import test_bp from blueprints.test.model import Users import factory as fac # Faab Project Demo class DBConfig(object): # 基础配置 user = 'faab' host = 'localhost' password = 'faab' SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://%s:%s@%s:3306/%s' % (user, password, host, 'faab') SQLALCHEMY_BINDS = { 'test': 'mysql+pymysql://%s:%s@%s:3306/%s' % (user, password, host, 'test') } SECRET_KEY = 'session_key' models = [ [ { "model": Users, "bp": test_bp, "url_prefix": "Users" } ] ] app = Faab(import_name=__name__, static_url_path='/s') app.add_models(models) app.add_db_config(DBConfig) fac.register(app) app.faab_ready() application = app # uWSGI启动必须有application @app.before_request def auth():
jwt_authentication()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: leeyuentuen/polestar_api # Path: custom_components/polestar_api/pypolestar/auth.py class PolestarAuth: """base class for Polestar authentication.""" def __init__(self, username: str, password: str) -> None: """Initialize the Polestar authentication.""" self.username = username self.password = password self.access_token = None self.refresh_token = None self.token_expiry = None self.latest_call_code = None self._client_session = httpx.AsyncClient() async def get_token(self, refresh=False) -> None: """Get the token from Polestar.""" headers = {"Content-Type": "application/json"} operationName = "getAuthToken" if not refresh: code = await self._get_code() if code is None: return params = { "query": "query getAuthToken($code: String!) { getAuthToken(code: $code) { id_token access_token refresh_token expires_in }}", "operationName": operationName, "variables": json.dumps({"code": code}), } else: if self.refresh_token is None: return token = self.refresh_token operationName = "refreshAuthToken" headers["Authorization"] = f"Bearer {self.access_token}" params = { "query": "query refreshAuthToken($token: String!) { refreshAuthToken(token: $token) { id_token access_token refresh_token expires_in }}", "operationName": operationName, "variables": json.dumps({"token": token}), } result = await self._client_session.get("https://pc-api.polestar.com/eu-north-1/auth/", params=params, headers=headers, timeout=HTTPX_TIMEOUT) self.latest_call_code = result.status_code resultData = result.json() if result.status_code != 200 or ("errors" in resultData and len(resultData["errors"])): _LOGGER.error(result) raise PolestarAuthException("Error getting token", result.status_code) _LOGGER.debug(resultData) if resultData['data']: self.access_token = resultData['data'][operationName]['access_token'] self.refresh_token = resultData['data'][operationName]['refresh_token'] self.token_expiry = datetime.now( ) + timedelta(seconds=resultData['data'][operationName]['expires_in']) # ID Token _LOGGER.debug(f"Response {self.access_token}") async def _get_code(self) -> None: query_params = await self._get_resume_path() # check if code is in query_params if query_params.get('code'): return query_params.get('code') # get the resumePath if query_params.get('resumePath'): resumePath = query_params.get('resumePath') if resumePath is None: return params = { 'client_id': 'polmystar' } data = { 'pf.username': self.username, 'pf.pass': self.password } result = await self._client_session.post( f"https://polestarid.eu.polestar.com/as/{resumePath}/resume/as/authorization.ping", params=params, data=data ) self.latest_call_code = result.status_code if result.status_code != 302: raise PolestarAuthException("Error getting code", result.status_code) # get the realUrl url = result.url code = result.next_request.url.params.get('code') # sign-in-callback result = await self._client_session.get(result.next_request.url, timeout=HTTPX_TIMEOUT) self.latest_call_code = result.status_code if result.status_code != 200: _LOGGER.error(result) raise PolestarAuthException("Error getting code callback", result.status_code) # url encode the code result = await self._client_session.get(url) self.latest_call_code = result.status_code return code async def _get_resume_path(self): """Get Resume Path from Polestar.""" params = { "response_type": "code", "client_id": "polmystar", "redirect_uri": "https://www.polestar.com/sign-in-callback" } result = await self._client_session.get("https://polestarid.eu.polestar.com/as/authorization.oauth2", params=params, timeout=HTTPX_TIMEOUT) if result.status_code in (303, 302): return result.next_request.url.params _LOGGER.error(result.text) raise PolestarAuthException("Error getting resume path ", result.status_code) # Path: custom_components/polestar_api/pypolestar/const.py BATTERY_DATA = "getBatteryData" # Path: custom_components/polestar_api/pypolestar/const.py CACHE_TIME = 30 # Path: custom_components/polestar_api/pypolestar/const.py CAR_INFO_DATA = "getConsumerCarsV2" # Path: custom_components/polestar_api/pypolestar/const.py ODO_METER_DATA = "getOdometerData" # Path: custom_components/polestar_api/pypolestar/exception.py class PolestarApiException(Exception): """Base class for exceptions in this module.""" # Path: custom_components/polestar_api/pypolestar/exception.py class PolestarAuthException(Exception): """Base class for exceptions in Auth module.""" error_code: int = None message: str = None def __init__(self, message, error_code) -> None: """Initialize the Polestar API.""" super().__init__(message) self.error_code = error_code # Path: custom_components/polestar_api/pypolestar/exception.py class PolestarNoDataException(Exception): """Exception for no data.""" # Path: custom_components/polestar_api/pypolestar/exception.py class PolestarNotAuthorizedException(Exception): """Exception for unauthorized call.""" # Path: custom_components/polestar_api/pypolestar/polestar.py from datetime import datetime, timedelta from .auth import PolestarAuth from .const import BATTERY_DATA, CACHE_TIME, CAR_INFO_DATA, ODO_METER_DATA from .exception import ( PolestarApiException, PolestarAuthException, PolestarNoDataException, PolestarNotAuthorizedException, ) import logging import httpx """Asynchronous Python client for the Polestar API.""""" _LOGGER = logging.getLogger(__name__) class PolestarApi: """Main class for handling connections with the Polestar API.""" def __init__(self, username: str, password: str) -> None: """Initialize the Polestar API."""
self.auth = PolestarAuth(username, password)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: dubverse-ai/MahaTTS # Path: maha_tts/config.py class config: semantic_model_centroids = 10000 + 1 seed_value = 3407 # Text to Semantic t2s_position = 4096 langs = ['english','tamil', 'telugu', 'punjabi', 'marathi', 'hindi', 'gujarati', 'bengali', 'assamese'] lang_index = {i:j for j,i in enumerate(langs)} # Semantic to acoustic sa_timesteps_max = 1000 #Acoustic Properties CLIP_LENGTH = 500 MAX_WAV_VALUE=32768.0 filter_length=1024 hop_length=256 #256 window = 'hann' win_length=1024 n_mel_channels=80 sampling_rate=22050 mel_fmin=0.0 mel_fmax=8000.0 # Path: maha_tts/text/symbols.py # Path: maha_tts/models/modules.py class GST(nn.Module): def __init__(self,model_channels=512,num_heads=8,in_channels=80,k=2): super(GST,self).__init__() self.model_channels=model_channels self.num_heads=num_heads self.reference_encoder=nn.Sequential( nn.Conv1d(in_channels,model_channels,3,padding=1,stride=2), nn.Conv1d(model_channels, model_channels*k,3,padding=1,stride=2), AttentionBlock(model_channels*k, num_heads, relative_pos_embeddings=True, do_checkpoint=False), AttentionBlock(model_channels*k, num_heads, relative_pos_embeddings=True, do_checkpoint=False), AttentionBlock(model_channels*k, num_heads, relative_pos_embeddings=True, do_checkpoint=False), AttentionBlock(model_channels*k, num_heads, relative_pos_embeddings=True, do_checkpoint=False), AttentionBlock(model_channels*k, num_heads, relative_pos_embeddings=True, do_checkpoint=False) ) def forward(self,x): x=self.reference_encoder(x) return x # Path: maha_tts/models/autoregressive.py import os,sys import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import functools from typing import Any from torch.utils.data import Dataset,DataLoader from transformers import GPT2Tokenizer,GPT2Config, GPT2Model, GPT2LMHeadModel from tqdm import tqdm from maha_tts.config import config from maha_tts.text.symbols import labels,code_labels,text_labels,text_labels_en from maha_tts.models.modules import GST ''' Inspiration taken from https://github.com/neonbjb/tortoise-tts/blob/main/tortoise/models/autoregressive.py ''' def null_position_embeddings(range, dim): return torch.zeros((range.shape[0], range.shape[1], dim), device=range.device) class TS_model(nn.Module): def __init__(self,n_embed = 512, n_layer = 16, n_head = 8, n_positions = 2048, name='Smolie-in'): super(TS_model,self).__init__() self.vocab_size=len(labels) self.n_positions=n_positions self.n_embed=n_embed self.n_layer=n_layer self.n_head=n_head self.name=name self.config = GPT2Config(vocab_size=self.vocab_size,n_positions=self.n_positions,n_embd=self.n_embed,n_layer=self.n_layer,n_head=self.n_head) self.gpt = GPT2Model(self.config) del self.gpt.wpe self.gpt.wpe = functools.partial(null_position_embeddings, dim=self.n_embed) # Built-in token embeddings are unused. del self.gpt.wte self.GST = GST(model_channels=self.n_embed,num_heads=self.n_head,in_channels=config.n_mel_channels,k=1) if self.name == 'Smolie-en': self.text_head = nn.Linear(self.n_embed,len(text_labels_en)) else:
self.text_head = nn.Linear(self.n_embed,len(text_labels))
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: WCGKING/KINGUSERBOT # Path: Branded/modules/data.py async def is_gdel_user(user_id: int) -> bool: user = await gdeldb.find_one({"user_id": user_id}) if not user: return False return True # Path: Branded/modules/data.py async def get_gdel_user() -> list: results = [] async for user in gdeldb.find({"user_id": {"$gt": 0}}): user_id = user["user_id"] results.append(user_id) return results # Path: Branded/modules/data.py async def get_gdel_count() -> int: users = gdeldb.find({"user_id": {"$gt": 0}}) users = await users.to_list(length=100000) return len(users) # Path: Branded/modules/data.py async def add_gdel_user(user_id: int): is_gdel = await is_gdel_user(user_id) if is_gdel: return return await gdeldb.insert_one({"user_id": user_id}) # Path: Branded/modules/data.py async def del_gdel_user(user_id: int): is_gdel = await is_gdel_user(user_id) if not is_gdel: return return await gdeldb.delete_one({"user_id": user_id}) # Path: Branded/plugins/gdelete.py import asyncio from pyrogram import * from pyrogram.types import Message from .. import * from ..modules.data import (is_gdel_user, get_gdel_user, get_gdel_count, add_gdel_user, del_gdel_user) @app.on_message(commandx(["gdl", "gdel", "gdelete"]) & SUPUSER) async def add_gdelete_user(client, message: Message): if not message.reply_to_message: if len(message.command) != 2: return await message.reply_text("Reply to a user's message or give username/user_id.") user = message.text.split(None, 1)[1] user = await app.get_users(user) user_id = user.id mention = user.mention else: user_id = message.reply_to_message.from_user.id mention = message.reply_to_message.from_user.mention if user_id == message.from_user.id: return await message.reply_text("You want to add Global Delete yourself? How Fool!") elif user_id == SUPUSER: return await message.reply_text("Should i activate Global Delete on myself? Lol") elif user_id in SUDOERS: return await message.reply_text("You want add Global Delete on sudo user?") is_gdel = await is_gdel_user(user_id) if is_gdel: return await message.reply_text("{0} is already affected by **Global Delete**".format(mention)) if user_id not in GDELSUB: GDELSUB.add(user_id)
await add_gdel_user(user_id)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: kudelskisecurity/fuzzomatic # Path: fuzzomatic/approaches/functions.py def score_functions(functions): interesting_function_names = ["parse", "load", "read", "str", "eval"] # order functions by most interesting first ordered_functions = [] for f in functions: function_name = f[1] args = f[2] priority = 0 is_name_interesting = False for pattern in interesting_function_names: if pattern in function_name: is_name_interesting = True if len(args) == 1: arg_type = args[0] if arg_type == "&str": priority = 100 elif arg_type == "&[u8]": priority = 100 elif arg_type == "String": priority = 100 elif arg_type == "bool": priority = 0 elif arg_type == "unknown": priority = 10 elif type(arg_type) == tuple and arg_type[0] == "&array": priority = 100 elif is_name_interesting: priority = 100 if args[0] == "self": priority = -15 elif args[0] == "self": # functions with "self" as first argument priority = -50 else: priority = 50 elif len(args) > 1: known_types = 0 for arg in args: if arg != "unknown": known_types += 1 if known_types == len(args): priority = 30 if "&str" in args or "&[u8]" in args or "String" in args: priority = 75 if any(type(arg) == tuple and arg[0] == "&array" for arg in args): priority = 75 else: # functions with multiple arguments where not all types are known priority = -10 if args[0] == "self": # functions with "self" as first argument priority = -50 else: # skip functions with no arguments priority = -100 # give low priority to functions that are likely to load something by filename if "file" in function_name and arg_type == "&str": priority = 0 augmented_function = [*f, priority] ordered_functions.append(augmented_function) ordered_functions = sorted(ordered_functions, key=lambda x: x[3], reverse=True) return ordered_functions # Path: fuzzomatic/tools/cargo_doc.py def parse_cargo_doc_json(path): with open(path) as f: jso = json.loads(f.read()) # get functions that take only one parameter and that are public root = jso["root"] index = jso["index"] root_elem = index[root] root_inner_items = root_elem["inner"]["module"]["items"] functions = [] for elem in root_inner_items: path = [] e = index[elem] funcs = parse_item(index, e, path) functions.extend(funcs) return functions # Path: fuzzomatic/docparse.py import argparse from fuzzomatic.approaches.functions import score_functions from fuzzomatic.tools.cargo_doc import parse_cargo_doc_json #!/usr/bin/env python3 def get_parser(): prog_name = "docparse" parser = argparse.ArgumentParser( prog=prog_name, description="Parse cargo doc json and print public functions", ) parser.add_argument( "json_path", help="Path to cargo doc json file", ) return parser def main(): parser = get_parser() args = parser.parse_args() functions = parse_cargo_doc_json(args.json_path)
ordered_functions = score_functions(functions)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: muyuworks/myla # Path: myla/vectorstores/_base.py class Record(Dict): @staticmethod def values_to_text(record: Dict, props: List[str] = None, separator: str = '\001'): if props and not isinstance(props, list): raise ValueError("props should be a list") if props: o = itemgetter(*props) if len(props) == 1: v = [o(record)] else: v = list(o(record)) else: v = list(record.values()) vl = [] for i in v: if not isinstance(i, str): vl.append(json.dumps(i, ensure_ascii=False)) else: vl.append(i) v = vl return separator.join(v) # Path: myla/vectorstores/_base.py class VectorStore(ABC): def __init__(self) -> None: pass @abstractmethod def create_collection(self, collection: str, schema: Dict[str, type] = None, mode="create"): """Create a new collection""" @abstractmethod def add( self, collection: str, records: List[Record], embeddings_columns: Optional[List[str]] = None, vectors: Optional[List[List[float]]] = None, **kwargs ): """Add record to the vectorsotre""" @abstractmethod def delete(self, collection: str, query: str): """Delete record from the vectorstore""" @abstractmethod def search( self, collection: str = None, query: str = None, vector: List = None, filter: Any = None, limit: int = 20, columns: Optional[List[str]] = None, with_vector: bool = False, with_distance: bool = False, **kwargs ) -> Optional[List[Record]]: """Search records""" async def asearch( self, collection: str = None, query: str = None, vector: List = None, filter: Any = None, limit: int = 20, columns: Optional[List[str]] = None, with_vector: bool = False, with_distance: bool = False, **kwargs ): return await asyncio.get_running_loop().run_in_executor( None, partial(self.search, **kwargs), collection, query, vector, filter, limit, columns, with_vector, with_distance ) # Path: myla/vectorstores/_embeddings.py class Embeddings(ABC): @abstractmethod def embed_batch(self, texts: List[str], **kwargs) -> List[List[float]]: """Embed text batch.""" def embed(self, text: str, **kwargs) -> List[float]: """Embed text.""" return self.embed_batch(texts=[text], **kwargs)[0] async def aembed(self, text: str, **kwargs) -> List[float]: """Asynchronous Embed text.""" return await asyncio.get_running_loop().run_in_executor( None, self.embed, text, **kwargs ) async def aembed_batch(self, texts: [str], **kwargs) -> List[List[float]]: """Asynchronous Embed text.""" return await asyncio.get_running_loop().run_in_executor( None, self.embed_batch, texts, **kwargs ) # Path: myla/vectorstores/lancedb_vectorstore.py from typing import Any, List, Optional, Dict from ._base import Record, VectorStore from ._embeddings import Embeddings import pyarrow as pa import lancedb as lancedb import pyarrow as pa VECTOR_COLUMN_NAME = "_vector" class LanceDB(VectorStore): def __init__(self, db_uri, embeddings: Embeddings = None) -> None: super().__init__() try: pa.__version__ except ImportError as exc: raise ImportError( "Could not import pyarrow python package. " "Please install it with `pip install pyarrow`." ) from exc try: # disable diagnostics lancedb.utils.CONFIG['diagnostics'] = False except ImportError as exc: raise ImportError( "Could not import lancedb python package. " "Please install it with `pip install lancedb`." ) from exc self._db_uri = db_uri self._embeddings = embeddings self._db = lancedb.connect(self._db_uri) self._tables = {} def create_collection(self, collection: str, schema: Dict[str, type] = None, mode="create"): if schema is None: raise ValueError("Invalid schema to create LanceDB table.") s = self._convert_schema(schema=schema) self._db.create_table(collection, schema=s, mode=mode) def add( self, collection: str,
records: List[Record],
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: OSU-NLP-Group/TableLlama # Path: llama_attn_replace.py def replace_llama_attn(use_flash_attn=True, use_full=False): if use_flash_attn: cuda_major, cuda_minor = torch.cuda.get_device_capability() if cuda_major < 8: warnings.warn( "Flash attention is only supported on A100 or H100 GPU during training due to head dim > 64 backward." "ref: https://github.com/HazyResearch/flash-attention/issues/190#issuecomment-1523359593" ) transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = ( _prepare_decoder_attention_mask ) transformers.models.llama.modeling_llama.LlamaAttention.forward = forward_flashattn_full if use_full else forward_flashattn else: transformers.models.llama.modeling_llama.LlamaAttention.forward = forward_noflashattn # Path: supervised_fine_tune.py PROMPT_DICT = { "prompt_input": ( "Below is an instruction that describes a task, paired with an input that provides further context. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n{instruction}\n\n### Input:\n{input_seg}\n\n### Question:\n{question}\n\n### Response:" ), "prompt_no_input": ( "Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n{instruction}\n\n### Response:" ), } # Path: inference_row_pop.py import os import json import sys import math import torch import argparse import transformers from peft import PeftModel from transformers import GenerationConfig from llama_attn_replace import replace_llama_attn from supervised_fine_tune import PROMPT_DICT from tqdm import tqdm # import textwrap # from queue import Queue # from threading import Thread # import gradio as gr def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--base_model', type=str, default="/data1/pretrained-models/llama-7b-hf") parser.add_argument('--cache_dir', type=str, default="./cache") parser.add_argument('--context_size', type=int, default=-1, help='context size during fine-tuning') parser.add_argument('--flash_attn', type=bool, default=False, help='') parser.add_argument('--temperature', type=float, default=0.6, help='') parser.add_argument('--top_p', type=float, default=0.9, help='') parser.add_argument('--max_gen_len', type=int, default=512, help='') parser.add_argument('--input_data_file', type=str, default='input_data/', help='') parser.add_argument('--output_data_file', type=str, default='output_data/', help='') args = parser.parse_args() return args def generate_prompt(instruction, question, input_seg=None): if input:
return PROMPT_DICT["prompt_input"].format(instruction=instruction, input_seg=input_seg, question=question)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: pytorch-labs/torchfix # Path: torchfix/torchfix.py class TorchChecker: name = "TorchFix" version = __version__ # The parameters need to have these exact names. # See https://flake8.pycqa.org/en/latest/plugin-development/plugin-parameters.html # `tree` is unused, but the plugin doesn't work without it. def __init__(self, tree, lines): # Filter out files that don't have "torch" string in them. # This avoids expensive parsing. MARKER = "torch" # this will catch import torch or functorch has_marker = False self.module = None for line in lines: if MARKER in line: has_marker = True break if has_marker: module = cst.parse_module("".join(lines)) self.module = cst.MetadataWrapper(module, unsafe_skip_copy=True) self.violations = [] self.visitors = GET_ALL_VISITORS() def run(self): if self.module: self.module.visit_batched(self.visitors) for v in self.visitors: self.violations += v.violations for violation in self.violations: yield violation.flake8_result() @staticmethod def add_options(optmanager): optmanager.extend_default_ignore(DISABLED_BY_DEFAULT) # Path: torchfix/torchfix.py class TorchCodemod(codemod.Codemod): def __init__( self, context: codemod.CodemodContext, config: Optional[TorchCodemodConfig] = None, ) -> None: super().__init__(context) self.config = config def transform_module_impl(self, module: cst.Module) -> cst.Module: # We use `unsafe_skip_copy`` here not only to save some time, but # because `deep_replace`` is identity-based and will not work on # the original module if the wrapper does a deep copy: # in that case we would need to use `wrapped_module.module` # instead of `module`. wrapped_module = cst.MetadataWrapper(module, unsafe_skip_copy=True) visitors = GET_ALL_VISITORS() violations = [] needed_imports = [] wrapped_module.visit_batched(visitors) for v in visitors: violations += v.violations needed_imports += v.needed_imports fixes_count = 0 replacement_map = {} assert self.context.filename is not None for violation in violations: skip_violation = False if self.config is None or self.config.select != "ALL": for disabled_code in DISABLED_BY_DEFAULT: if violation.error_code.startswith(disabled_code): skip_violation = True break if skip_violation: continue if violation.replacement is not None: replacement_map[id(violation.node)] = violation.replacement fixes_count += 1 try: path = Path(self.context.filename).relative_to(Path.cwd()) except ValueError: # Not a subpath of a current dir, use absolute path path = Path(self.context.filename) print(f"{path}{violation.codemod_result()}") new_module = deep_multi_replace(module, replacement_map) add_imports_visitor = codemod.visitors.AddImportsVisitor( self.context, needed_imports ) new_module = new_module.visit(add_imports_visitor) update_functorch_imports_visitor = _UpdateFunctorchImports() new_module = new_module.visit(update_functorch_imports_visitor) if fixes_count == 0 and not update_functorch_imports_visitor.changed: raise codemod.SkipFile("No changes") return new_module # Path: torchfix/torchfix.py class TorchCodemodConfig: select: Optional[str] = None # Path: torchfix/torchfix.py def GET_ALL_VISITORS(): return [ TorchDeprecatedSymbolsVisitor(DEPRECATED_CONFIG_PATH), TorchRequireGradVisitor(), TorchSynchronizedDataLoaderVisitor(), TorchVisionDeprecatedPretrainedVisitor(), TorchVisionDeprecatedToTensorVisitor(), TorchUnsafeLoadVisitor(), TorchReentrantCheckpointVisitor(), ] # Path: tests/test_torchfix.py from pathlib import Path from torchfix.torchfix import ( TorchChecker, TorchCodemod, TorchCodemodConfig, GET_ALL_VISITORS, ) import logging import libcst.codemod as codemod FIXTURES_PATH = Path(__file__).absolute().parent / "fixtures" LOGGER = logging.getLogger(__name__) def _checker_results(s): checker = TorchChecker(None, s) return [f"{line}:{col} {msg}" for line, col, msg, _ in checker.run()] def _codemod_results(source_path): with open(source_path) as source: code = source.read()
config = TorchCodemodConfig(select="ALL")
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: FISHers6/CodeLearn-Agent # Path: codelearn/project/project.py class Project: def __init__(self, id: str, local_dir: str, source_content: FileTree, repo_url: str = None, last_updated_time = None): """ :param name: 项目名称 :param contents: 一个字典,其中键是文件路径,值是文件内容 """ self.id = id self.local_dir = local_dir self.repo_url = repo_url self.contents = source_content self.last_updated_time = last_updated_time # Path: codelearn/utils/file_util.py def process_file_paths(file_paths: str) -> List[str]: # 用于存储处理后的路径 processed_paths = [] # 使用正则表达式来分割字符串,处理多种可能的分隔符 paths = re.split(r'[ ,;]+', file_paths) for path in paths: # 删除路径两侧可能存在的多余空格 path = path.strip() if not path: continue # 跳过空字符串 # 将路径分割为组件 path_components = re.split(r'[\\/]', path) # 使用 os.path.join 和 os.sep 来连接路径组件 normalized_path = os.path.join(*path_components) # 将处理后的路径添加到结果列表中 processed_paths.append(normalized_path) # 移除重复的路径 processed_paths = list(set(processed_paths)) return processed_paths # Path: codelearn/tools/file_content_view.py import json from typing import List, Optional from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.tools import BaseTool from codelearn.project.project import Project from codelearn.utils.file_util import process_file_paths class FileContentViewTool(BaseTool): """Tool to fetch and display detailed content of project files.""" name: str = "get_file_content" description: str = ( "The 'get_file_content' tool fetches and displays detailed content of specified files within the project, including both source code and documentation. It's an important tool for users who need detailed from code source." "Input a comma-separated list of file names (without folder or path names) to view. Incomplete paths are not accepted. For example swim-main/src/example.txt is a full path file, but 'src/example' is incomplete directory folder not allowed" "Output is a dictionary with 'files' key containing a list of dictionaries for each file, " "**Ensure you've requested the repository structure before asking for file contents.The requested file must exist in the project**" "Useful for users diving deep into a project's codebase or documentation to understand its intricacies." )
project: Project
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: kaixinol/twitter_user_tweet_crawler # Path: twitter_user_tweet_crawler/browser.py def get_browser(headless: bool = False) -> WebDriver: chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('--blink-settings=imagesEnabled=false') chrome_options.add_argument('--disable-remote-fonts') chrome_options.add_argument('--disable-gpu') chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--window-size=1200x600"') if headless: chrome_options.add_argument('--headless') driver = webdriver.Chrome(options=chrome_options) driver.__dict__['is_using'] = False return driver # Path: twitter_user_tweet_crawler/browser.py def get_multiple_browsers(count: int, headless: bool = False) -> list[WebDriver]: return [get_browser(headless) for _ in range(count)] # Path: twitter_user_tweet_crawler/pool.py class ThreadPool: browser: list[WebDriver] jobs: list[Callable] = [] pool: ThreadPoolExecutor def __init__(self, browser: list[WebDriver], pool: ThreadPoolExecutor): self.browser = browser self.pool = pool def check_and_work(self): if not self.jobs: return for i in self.browser: if not i.__dict__['is_using']: i: WebDriver i.__dict__['is_using'] = True job = self.jobs.pop(0) callback: Future = self.pool.submit(job, i) callback.add_done_callback(lambda future: self._on_job_complete(i, callback)) return def _on_job_complete(self, index, future): elements = self.browser.index(index) try: future.result() # By default, `concurrent.futures` will silently log errors but will not raise them # Throw the error directly finally: if slow_mode: sleep(30) self.browser[elements].__dict__['is_using'] = False self.check_and_work() # Path: twitter_user_tweet_crawler/util/config.py class Config: def load(self, setting: dict | str | Path): def save(self): def __getitem__(self, item): def set_work_directory(path: Path): # Path: twitter_user_tweet_crawler/__main__.py import concurrent.futures import json from pathlib import Path from time import sleep from urllib.parse import urlparse from loguru import logger from rich.prompt import Confirm from selenium.webdriver.chrome.webdriver import WebDriver from selenium.webdriver.common.by import By from .browser import get_browser, get_multiple_browsers from .pool import ThreadPool from .util.config import config, work_directory, set_work_directory from .tweet import Tweet def main(): cookie: list[dict] work_list: list[WebDriver] driver: WebDriver def read_config() -> list[dict]: with open(work_directory / 'cookie.json', 'r') as f: return json.load(f) def write_config(data: list[dict]): with open(work_directory / 'cookie.json', 'w') as f: json.dump(data, f) def set_cookie(browser: WebDriver): for i in cookie: browser.add_cookie(i) def get_executor(count: int | None = None): return concurrent.futures.ThreadPoolExecutor(max_workers=count) def get_items_need_handle(): return driver.find_elements(*selector) selector = (By.XPATH, '//*/div[2]/div/div[3]/a[@role="link"]')
(Path(config.save) / 'res').mkdir(exist_ok=True, parents=True)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: kirill-vish/Beyond-INet # Path: utils/misc.py def load_model_transform(model_name, pretrained_dir, img_size=224): print(f"Loading {model_name}") checkpoint_path = None transform_val = None if model_name == "deit3_21k": model = models_deit.deit_base_patch16_LS(img_size=img_size) checkpoint_path = os.path.join(pretrained_dir, "deit_3_base_224_21k.pth") elif model_name == "convnext_base_21k": model = models_convnextv1.convnext_base() checkpoint_path = os.path.join(pretrained_dir, "convnext_base_22k_1k_224.pth") elif model_name == "vit_clip": model, _, transform_val = open_clip.create_model_and_transforms( 'ViT-B-16', pretrained='laion400m_e31', force_image_size=img_size) model = models_clip.CLIPModel(model=model, model_name='ViT-B-16') checkpoint_path = None elif model_name == "convnext_clip": model, _, transform_val = open_clip.create_model_and_transforms( 'convnext_base', pretrained='laion400m_s13b_b51k', force_image_size=img_size) model = models_clip.CLIPModel(model=model, model_name='convnext_base') checkpoint_path = None if checkpoint_path is not None: checkpoint = torch.load(checkpoint_path) state_dict = checkpoint['model'] if img_size != 224 and model_name == 'deit3_21k': state_dict = interpolate_pos_embed(model, state_dict) msg = model.load_state_dict(state_dict, strict=False) print(msg) assert set(checkpoint['model'].keys()) == set( model.state_dict().keys()) assert len(msg.missing_keys) == 0 and len( msg.unexpected_keys ) == 0, "Some keys in the state dict do not match" return model, transform_val # Path: inference/modelvshuman/evaluation/evaluate.py IMAGENET_LABEL_FILE = pjoin(c.CODE_DIR, "evaluation", "imagenet_labels.txt") def print_performance_to_csv(model_name, dataset_name, performance, metric_name, data_parent_dir=c.PERFORMANCES_DIR): def print_predictions_to_console(softmax_output, top_n=5, labels_path=IMAGENET_LABEL_FILE): def __init__(self, model_name, dataset, data_parent_dir=c.RAW_DATA_DIR): def create_session_csv(self, session): def print_batch_to_csv(self, object_response, batch_targets, paths): class ResultPrinter(): # Path: inference/modelvshuman/utils.py def load_dataset(name, *args, **kwargs): default_kwargs = {"batch_size": 16, "num_workers": 4} kwargs = {**default_kwargs, **kwargs} logger.info(f"Loading dataset {name}") supported_datasets = dataset_module.list_datasets() module_name = supported_datasets.get(name, None) if module_name is None: raise NameError( f"Dataset {name} is not supported, " f"please select from {list(supported_datasets.keys())}") elif os.path.exists(join(c.DATASET_DIR, name)): return eval(f"dataset_module.{module_name}")(*args, **kwargs) elif try_download_dataset_from_github(name): return eval(f"dataset_module.{module_name}")(*args, **kwargs) else: raise NotImplementedError( f"Dataset {name} not available for download, please obtain the dataset " f"yourself and save it to {c.DATASET_DIR}") # Path: inference/modelvshuman/utils.py def load_model(model_name, *args): if model_name in zoomodels.__dict__: model = eval("pytorch_model_zoo.model_pytorch")(model_name, *args) framework = 'pytorch' else: model = eval(f"pytorch_model_zoo.model_timm")(model_name, *args) framework = 'pytorch' return model, framework # Path: inference/modelvshuman/model_evaluator.py import copy import datetime import logging import os import matplotlib as mpl import torch from torch.nn.functional import softmax from tqdm import tqdm from utils.misc import load_model_transform from .evaluation import evaluate as e from .utils import load_dataset, load_model logger = logging.getLogger(__name__) MAX_NUM_MODELS_IN_CACHE = 3 mpl.rcParams['font.size'] = 22 def device(): return torch.device("cuda:0" if torch.cuda.is_available() else "cpu") class ModelEvaluator: def _pytorch_evaluator(self, model_name, model, dataset, *args, **kwargs): """ Evaluate Model on the given dataset and return the accuracy. Args: model_name: model: dataset: *args: **kwargs: """ logging_info = f"Evaluating model {model_name} on dataset {dataset.name} using Pytorch Evaluator" logger.info(logging_info) print(logging_info) for metric in dataset.metrics: metric.reset() with torch.no_grad(): result_writer = e.ResultPrinter(model_name=model_name, dataset=dataset) for images, target, paths in tqdm(dataset.loader): images = images.to(device()) if "forward_batch" in dir(model): logits = model.forward_batch(images) softmax_output = model.softmax(logits) else: logits = model(images) softmax_output = softmax(logits, dim=1).detach().cpu().numpy() if isinstance(target, torch.Tensor): batch_targets = model.to_numpy(target) else: batch_targets = target predictions = dataset.decision_mapping(softmax_output) for metric in dataset.metrics: metric.update(predictions, batch_targets, paths) if kwargs["print_predictions"]: result_writer.print_batch_to_csv( object_response=predictions, batch_targets=batch_targets, paths=paths) def _get_datasets(self, dataset_names, *args, **kwargs): dataset_list = [] for dataset in dataset_names:
dataset = load_dataset(dataset, *args, **kwargs)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: shengliu66/ICV # Path: utils/forward_tracer.py class ForwardTracer: def __init__(self, model: PreTrainedModel, forward_trace: ForwardTrace, with_submodules: bool = False): self._model = model self._forward_trace = forward_trace self._with_submodules = with_submodules self._layers = get_layers(model) self._attn_layers = get_attention_layers(model) self._mlp_layers = get_mlp_layers(model) self._hooks = [] def __enter__(self): self._register_forward_hooks() def __exit__(self, exc_type, exc_value, traceback): for hook in self._hooks: hook.remove() if exc_type is None: residual_stream = self._forward_trace.residual_stream if residual_stream.hidden[0] == []: residual_stream.hidden.pop(0) for key in residual_stream.__dataclass_fields__.keys(): acts = getattr(residual_stream, key) # TODO: this is a hack, fix it if key != "hidden" and not self._with_submodules: continue nonempty_layer_acts = [layer_acts for layer_acts in acts if layer_acts != []][0] final_shape = torch.cat(nonempty_layer_acts, dim=0).shape for i, layer_acts in enumerate(acts): if layer_acts == []: acts[i] = torch.zeros(final_shape) else: acts[i] = torch.cat(layer_acts, dim=0) acts = torch.stack(acts).transpose(0, 1) setattr(residual_stream, key, acts) # if self._with_submodules: # self._forward_trace.attentions = torch.stack(self._forward_trace.attentions).transpose(0, 1) # else: self._forward_trace.attentions = None def _register_forward_hooks(self): model = self._model hooks = self._hooks residual_stream = self._forward_trace.residual_stream def store_activations(residual_stream: ResidualStream, acts_type: str, layer_num: int): def hook(model, inp, out): if isinstance(out, tuple): out = out[0] out = out.float().to("cpu", non_blocking=True) acts = getattr(residual_stream, acts_type) while len(acts) < layer_num + 1: acts.append([]) try: acts[layer_num].append(out) except IndexError: print(len(acts), layer_num) return hook def store_attentions(layer_num): def hook(model, inp, out): attention_maps = out[1] attention_maps = attention_maps.to("cpu", non_blocking=True).float() self._forward_trace.attentions[layer_num] = attention_maps return hook embedding_hook = get_embedding_layer(self._model).register_forward_hook( store_activations(residual_stream, "hidden", 0) ) hooks.append(embedding_hook) for i, layer in enumerate(self._layers): hidden_states_hook = layer.register_forward_hook(store_activations(residual_stream, "hidden", i + 1)) hooks.append(hidden_states_hook) if self._with_submodules: for i, mlp_layer in enumerate(self._mlp_layers): mlp_res_hook = mlp_layer.register_forward_hook(store_activations(residual_stream, "mlp", i)) hooks.append(mlp_res_hook) for i, attn_layer in enumerate(self._attn_layers): attn_res_hook = attn_layer.register_forward_hook(store_activations(residual_stream, "attn", i)) hooks.append(attn_res_hook) # attn_attentions_hook = attn_layer.register_forward_hook(store_attentions(i)) # hooks.append(attn_attentions_hook) # Path: utils/forward_tracer.py class ForwardTrace: def __init__(self): self.residual_stream: Optional[ResidualStream] = ResidualStream( hidden=[], attn=[], mlp=[], ) self.attentions: Optional[torch.Tensor] = None # Path: utils/context_manager.py import os from contextlib import AbstractContextManager, ExitStack from typing import Iterable from utils.forward_tracer import ForwardTracer, ForwardTrace class CombinedContextManager(AbstractContextManager): def __init__(self, context_managers): self.context_managers = context_managers self.stack = None def __enter__(self): self.stack = ExitStack() for cm in self.context_managers: self.stack.enter_context(cm) return self.stack def __exit__(self, exc_type, exc_val, exc_tb): if self.stack is not None: self.stack.__exit__(exc_type, exc_val, exc_tb) def modified_forward_context_manager(model, forward_modifiers=()): context_manager = CombinedContextManager([*forward_modifiers]) return context_manager def traced_forward_context_manager(model, with_submodules=False):
forward_trace = ForwardTrace()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Mohamad-Hussein/speech-assistant # Path: src/funcs.py def find_gpu_config(logger): """ Finds the GPU config and returns the device, device name and torch_dtype based on GPU platform and availability. Args: logger (logging.Logger): Logger instance to log messages onto model.log (for Windows) Returns: device (str): Device type, either cuda:0, cpu, or ... device_name (str): Device name torch_dtype (torch.dtype): Data type for torch, float16 for GPU, float32 for CPU """ import torch from torch import cuda from torch import float16, float32 logger.debug("Checking for GPU config") # Assume, then check device = torch.device("cuda:0" if cuda.is_available() else "cpu") torch_dtype = float16 if cuda.is_available() else float32 device_name = "" # CUDA if cuda.is_available(): # Debugging made easier device_name = cuda.get_device_name() logger.debug("GPU detected from cuda") logger.info(f"Device: {device}") logger.info(f"Device name: {cuda.get_device_name()}") logger.info(f"Device properties: {cuda.get_device_properties(device)}") logger.info(f"Device count: {cuda.device_count()}") logger.info(f"Device capability: {cuda.get_device_capability()}") logger.info(f"Current memory allocated: {cuda.mem_get_info()}") # AMD else: try: import torch_directml as dml if dml.is_available(): torch_dtype = float16 device = dml.device() device_name = dml.device_name(dml.default_device()) logger.debug("GPU detected from torch_directml") logger.info(f"Available: {dml.is_available()}") logger.info(f"Devices Available: {dml.device_count()}") logger.info(f"Device: {device}") logger.info(f"Default device: {dml.default_device()}") logger.info(f"Device name: {dml.device_name(0)}") logger.info(f"GPU memory: {dml.gpu_memory()}") else: torch_dtype = float32 logger.debug("No GPU detected, using cpu") logger.warning( "Attention, using the CPU is not recommended! Computation time will be long." ) # Use CPU if directml is not installed except Exception: logger.debug(f"Package directml not found") torch_dtype = float32 logger.debug("No GPU detected, using cpu") logger.warning( "Attention, using the CPU is not recommended! Computation time will be long." ) logger.info( f"GPU config -- device: {device}, device name: {device_name}, torch_dtype: {torch_dtype}" ) return device, device_name, torch_dtype # Path: src/funcs.py def process_text(text: str): """ Processes the text to not type dictation in which the user has not said anything Args: text (str): The text to be processed Returns: text (str): The processed text """ processed = text if text.strip().lower() in "you're not.": processed = "" return processed # Path: src/funcs.py def type_writing(text): """ Types the text onto the screen. Downside is that it is slow and activates other hotkeys if you hold windows due to it being real keystrokes. Args: text (str): The text to be typed Returns: None """ typewrite(text) # Path: src/funcs.py def copy_writing(text): """ Copies the text to the clipboard and writes it. Args: text (str): The text to be copied and written Returns: None """ copy(text) hotkey("ctrl", "v") # Path: src/model_inference.py from sys import exit from os.path import join from time import sleep, time from src.funcs import find_gpu_config, process_text from src.funcs import type_writing, copy_writing from transformers.pipelines import pipeline from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor from optimum.bettertransformer import BetterTransformer import logging # from optimum.onnxruntime import ORTModelForSpeechSeq2Seq # from optimum.nvidia.pipelines import pipeline # MODEL_ID = "openai/whisper-tiny.en" # ~400 MiB of GPU memory MODEL_ID = "distil-whisper/distil-small.en" # ~500-700 MiB of GPU memory # MODEL_ID = "distil-whisper/distil-medium.en" # ~900-1500 MiB of GPU memory # MODEL_ID = "distil-whisper/distil-large-v2" # ~1700-2000 MiB of GPU memory # MODEL_ID = "openai/whisper-large-v3" # ~4000 MiB of GPU memory # MODEL_ID = "optimum/whisper-tiny.en" # ~400 MiB of GPU memory # Choosing which way to write text. WRITE = type_writing def service(queue, event): # Configure the logging settings logging.basicConfig( level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s", filename=join("logs", "model.log"), filemode="w", ) logger = logging.getLogger(__name__) # Checking for GPU
device, device_name, torch_dtype = find_gpu_config(logger)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Fraunhofer-SCAI/corr_shap # Path: corr_shap/sampling/SamplingStrategy.py class SamplingStrategy: def __init__(self, explainer, **kwargs): """ Construct all necessary attributes for the SamplingStrategy object.""" self.data = explainer.data.data self.data_weights = explainer.data.weights self.data_weight_sum = np.sum(self.data_weights) self.N = explainer.N # num samples in self.data def sample(self, m): """ Return prepared sample data. These data have fixed features for those contained in subset (m=1) and normalized weights. :param m: given mask of subset :return: samples with fixed masked features and normalized weights """ x = self.x samples = self.data.copy() samples = self.set_masked_features_to_instance(m, x, samples) weights = self.normalize(self.data_weights) return samples, weights def normalize(self, weights): """ Normalize weights by their sum""" if self.data_weight_sum != 0: weights = weights/self.data_weight_sum return weights def set_masked_features_to_instance(self, m, x, samples): """ Set masked features for subset to given instance. :param m: given mask of subset :param x: given instance to be explained :param samples: background data that are the basis for the sample :return: samples with fixed masked features """ if isinstance(self.varyingFeatureGroups, (list,)): for j in range(self.varyingFeatureGroups.shape[0]): for k in self.varyingFeatureGroups[j]: if m[j] == 1.0: samples[:, k] = x[0, k] else: # for non-jagged numpy array we can significantly boost performance mask = m == 1.0 groups = self.varyingFeatureGroups[mask] if len(groups.shape) == 2: for group in groups: samples[:, group] = x[0, group] else: # further performance optimization in case each group has a single feature evaluation_data = x[0, groups] samples[:, groups] = evaluation_data return samples def set_instance(self, instance): """ Set instance to x. """ self.x = instance.x.copy() def set_varying_feature_groups(self, varying_groups): """ Set indicies of varying feature groups.""" self.varyingFeatureGroups = varying_groups # Path: corr_shap/sampling/sampling_factory.py def get_sampling_strategy(type, explainer, kwargs): """Assign the sampling strategy method to the explainer based on the given type. """ sampling_strategies = {"default": SamplingStrategy, "gauss": GaussStrategy, "copula": CopulaStrategy, "empirical": EmpiricalStrategy, "gauss+empirical": GaussEmpiricalStrategy, "copula+empirical": CopulaEmpiricalStrategy} return sampling_strategies[type](explainer=explainer, **kwargs) # Path: corr_shap/CorrExplainer.py from scipy.special import binom from scipy import sparse from shap.utils._legacy import convert_to_instance, match_instance_to_data, IdentityLink from shap.explainers._explainer import Explainer from shap.explainers._kernel import KernelExplainer from shap.explainers._kernel import Kernel as KernelExplainer from corr_shap.sampling.SamplingStrategy import SamplingStrategy from corr_shap.sampling.sampling_factory import get_sampling_strategy import numpy as np import pandas as pd import logging import copy import itertools import typing import warnings try: except ImportError: log = logging.getLogger('corr_shap') class CorrExplainer(KernelExplainer): """Uses the modified Kernel SHAP method to explain the output of any function. The modifications (based on the paper 'Explaining individual predictions when features are dependent: More accurate approximations to Shapley values' by Kjersti Aas, Martin Jullum and Anders Løland) offer the possibility to include dependencies between features. There are 3 different approaches, which are described in the following sampling strategies. """
def __init__(self, model, data, link=IdentityLink(), sampling: typing.Union[str, SamplingStrategy]="default", sampling_kwargs={}, **kwargs):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: codereport/jello # Path: grid.py class Grid: def __init__(self, n): self.n = n * 2 self.grid = [[" "] * self.n, [" "] * self.n] def add_level(self): self.grid.append([" "] * self.n) self.grid.append([" "] * self.n) def add_subtree(self, level, start, end, s): if s in ["W", "m", "d"]: self.grid[level * 2 ][start] = VERT self.grid[level * 2 + 1][start] = s return if (level + 1) * 2 > len(self.grid): self.add_level() mid = (start + end) // 2 self.grid[level * 2][start ] = START self.grid[level * 2][end ] = END self.grid[level * 2][start + 1:end ] = list(HORIZ * (end - start -1 )) self.grid[level * 2][(start + end) // 2] = MID self.grid[level * 2 + 1][mid - len(s) // 2:mid - len(s) // 2 + len(s)] = list(s) def fill_in_vertical_bars(self): for column in range(0, self.n): found_start_end = False for row in reversed(range(len(self.grid))): c = self.grid[row][column] if c in [START, END]: found_start_end = True elif found_start_end: if c == " ": self.grid[row][column] = "⋮" # │ alternative else: found_start_end = False # combinator chain sequence def ccs(self): first_two = "".join("".join(row).strip()[0:2] for row in self.grid) no_bars = "".join(c for c in first_two if c not in "─└ ⋮┬│") while "h₁" in no_bars: no_bars = no_bars.replace("h₁", "") return no_bars def display(self, indent = 0): for row in self.grid: print(" " * indent + "".join(row)) # Path: utils.py class Chain(Enum): MONADIC = 1 DYADIC = 2 # Path: utils.py class Quick(Enum): QUICK = 3 EACH = 10 FLIP = 50 # Path: utils.py class Separator(Enum): MONADIC = 20 DYADIC = 21 # Path: jello.py import subprocess import algorithm import arity_notation import draw import tokens import utils from colorama import Fore, init from prompt_toolkit import prompt from prompt_toolkit.completion import WordCompleter from prompt_toolkit.history import FileHistory from prompt_toolkit.shortcuts import CompleteStyle from grid import Grid from utils import Chain, Quick, Separator #!/usr/bin/env python3 def clear_screen(): subprocess.call("clear", shell=True) def run_jelly(expr: str, args: list[str]): try: command = ["jelly", "eun", expr, *args] result = subprocess.run(command, text=True, capture_output=True, check=True) output_text = result.stdout.strip() draw.cprint(output_text, Fore.GREEN, True) except subprocess.CalledProcessError as e: # Print the stderr output for more information about the error print(Fore.RED + f"Error: {e}") print(Fore.RED + "stderr:", e.stderr) completer = WordCompleter( [k for k in sorted( list(tokens.niladic.keys()) + list(tokens.monadic.keys()) + list(tokens.dyadic.keys()) + list(tokens.quick.keys()) + list(tokens.separators.keys())) if len(k) > 1]) history = FileHistory("jello_history.txt") def is_nilad_array(s: str) -> bool: return set(list(s)).issubset(list("0123456789,[]")) def to_jelly(token: str) -> str: if token in tokens.monadic: return tokens.monadic[token] if token in tokens.dyadic: return tokens.dyadic[token] if token in tokens.niladic: return tokens.niladic[token] if token in tokens.quick: return tokens.quick[token] if token in tokens.separators: return tokens.separators[token] if is_nilad_array(token): return token raise Exception(f"{token} is not a valid Jello keyword.") def convert(expr: list[str]) -> str: return "".join([to_jelly(t) for t in expr]) def keyword_arity(k: str) -> int: if k in tokens.niladic: return 0 if k in tokens.monadic: return 1 if k in tokens.dyadic: return 2 if k == "each": return Quick.EACH if k == "c": return Quick.FLIP if k in tokens.quick: return Quick.QUICK
if k == ".": return Separator.MONADIC
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: mMrBun/Chat2BI # Path: llms/chatglm3/conversation.py def preprocess_text( system: str | None, tools: list[dict] | None, history: list[Conversation], ) -> str: if tools: tools = json.dumps(tools, indent=4, ensure_ascii=False) prompt = f"{Role.SYSTEM}\n" prompt += system if not tools else TOOL_PROMPT if tools: tools = json.loads(tools) prompt += json.dumps(tools, ensure_ascii=False) for conversation in history: prompt += f'{conversation}' prompt += f'{Role.ASSISTANT}\n' return prompt # Path: llms/chatglm3/conversation.py class Conversation: role: Role content: str tool: str | None = None image: Image | None = None def __str__(self) -> str: print(self.role, self.content, self.tool) match self.role: case Role.SYSTEM | Role.USER | Role.ASSISTANT | Role.OBSERVATION: return f'{self.role}\n{self.content}' case Role.TOOL: return f'{self.role}{self.tool}\n{self.content}' case Role.INTERPRETER: return f'{self.role}interpreter\n{self.content}' # Human readable format def get_text(self) -> str: text = postprocess_text(self.content) match self.role.value: case Role.TOOL.value: text = f'Calling tool `{self.tool}`:\n{text}' case Role.INTERPRETER.value: text = f'{text}' case Role.OBSERVATION.value: text = f'Observation:\n```\n{text}\n```' return text # Path: llms/chatglm3/conversation.py class Role(Enum): SYSTEM = auto() USER = auto() ASSISTANT = auto() TOOL = auto() INTERPRETER = auto() OBSERVATION = auto() def __str__(self): match self: case Role.SYSTEM: return "<|system|>" case Role.USER: return "<|user|>" case Role.ASSISTANT | Role.TOOL | Role.INTERPRETER: return "<|assistant|>" case Role.OBSERVATION: return "<|observation|>" # Path: core/build_tools/utils.py def extract_code(text: str): try: pattern = r'```([^\n]*)\n(.*?)```' matches = re.findall(pattern, text, re.DOTALL) return matches[-1][1] except Exception as e: return None # Path: llms/chatglm3/code_interpreter.py from llms.chatglm3.conversation import preprocess_text, Conversation, Role from core.build_tools.utils import extract_code SYSTEM_PROMPT = ('你是一位智能AI助手,你叫ChatGLM,你连接着一台电脑,但请注意不能联网。在使用Python' '解决任务时,你可以运行代码并得到结果,如果运行结果有错误,你需要尽可能对代码进行改进。你可以处理用户上传到电脑上的文件,文件默认存储路径是/mnt/data/。') MAX_LENGTH = 8192 TRUNCATE_LENGTH = 1024 def is_valid_python(code: str) -> bool: try:
code = extract_code(code)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: compphoto/Intrinsic # Path: intrinsic/ordinal_util.py def base_resize(img, base_size=384): """TODO DESCRIPTION params: img (TODO): TODO base_size (int) optional: TODO (default 384) returns: net_input (TODO): TODO """ h, w, _ = img.shape max_dim = max(h, w) scale = base_size / max_dim new_h, new_w = scale * h, scale * w new_h, new_w = round_32(new_h), round_32(new_w) net_input = resize(img, (new_h, new_w, 3), anti_aliasing=True) return net_input # Path: intrinsic/ordinal_util.py def equalize_predictions(img, base, full, p=0.5): """TODO DESCRIPTION params: img (TODO): TODO base (TODO): TODO full (TODO): TODO p (int) optional: TODO (default 0.5) returns: base (TODO): TODO new_full (TODO): TODO """ h, w, _ = img.shape full_shd = (1. / full.clip(1e-5)) - 1. base_shd = (1. / base.clip(1e-5)) - 1. full_alb = get_brightness(img) / full_shd.clip(1e-5) base_alb = get_brightness(img) / base_shd.clip(1e-5) rand_msk = (np.random.randn(h, w) > p).astype(np.uint8) flat_full_alb = full_alb[rand_msk == 1] flat_base_alb = base_alb[rand_msk == 1] scale, _, _, _ = np.linalg.lstsq(flat_full_alb.reshape(-1, 1), flat_base_alb, rcond=None) new_full_alb = scale * full_alb new_full_shd = get_brightness(img) / new_full_alb.clip(1e-5) new_full = 1.0 / (1.0 + new_full_shd) return base, new_full # Path: intrinsic/pipeline.py import torch import numpy as np from skimage.transform import resize from chrislib.resolution_util import optimal_resize from chrislib.general import round_32, uninvert from intrinsic.ordinal_util import base_resize, equalize_predictions def run_pipeline( models, img_arr, output_ordinal=False, resize_conf=0.0, base_size=384, maintain_size=False, linear=False, device='cuda', lstsq_p=0.0, inputs='all'): """Runs the complete pipeline for shading and albedo prediction params: models (dict): models dictionary returned by model_util.load_models() img_arr (np.array): RGB input image as numpy array between 0-1 output_ordinal (bool) optional: whether or not to output intermediate ordinal estimations (default False) resize_conf (float) optional: confidence to use for resizing (between 0-1) if None maintain original size (default None) base_size (int) optional: size of the base resolution estimation (default 384) maintain_size (bool) optional: whether or not the results match the input image size (default False) linear (bool) optional: whether or not the input image is already linear (default False) device (str) optional: string representing device to use for pipeline (default "cuda") lstsq_p (float) optional: subsampling factor for computing least-squares fit when matching the scale of base and full estimations (default 0.0) inputs (str) optional: network inputs ("full", "base", "rgb", "all") the rgb image is always included (default "all") returns: results (dict): a result dictionary with albedo, shading and potentiall ordinal estimations """ results = {} orig_h, orig_w, _ = img_arr.shape # if no confidence value set, just round original size to 32 for model input if resize_conf is None: img_arr = resize(img_arr, (round_32(orig_h), round_32(orig_w)), anti_aliasing=True) # if a the confidence is an int, just rescale image so that the large side # of the image matches the specified integer value elif isinstance(resize_conf, int): scale = resize_conf / max(orig_h, orig_w) img_arr = resize( img_arr, (round_32(orig_h * scale), round_32(orig_w * scale)), anti_aliasing=True) # if the confidence is a float use the optimal resize code from Miangoleh et al. elif isinstance(resize_conf, float): img_arr = optimal_resize(img_arr, conf=resize_conf) fh, fw, _ = img_arr.shape # if the image is in sRGB we do simple linearization using gamma=2.2 if not linear: lin_img = img_arr ** 2.2 else: lin_img = img_arr with torch.no_grad(): # ordinal shading estimation -------------------------- # resize image for base and full estimations and send through ordinal net base_input = base_resize(lin_img, base_size) full_input = lin_img base_input = torch.from_numpy(base_input).permute(2, 0, 1).to(device).float() full_input = torch.from_numpy(full_input).permute(2, 0, 1).to(device).float() base_out = models['ordinal_model'](base_input.unsqueeze(0)).squeeze(0) full_out = models['ordinal_model'](full_input.unsqueeze(0)).squeeze(0) # the ordinal estimations come out of the model with a channel dim base_out = base_out.permute(1, 2, 0).cpu().numpy() full_out = full_out.permute(1, 2, 0).cpu().numpy() base_out = resize(base_out, (fh, fw)) # if we are using all inputs, we scale the input estimations using the base estimate if inputs == 'all':
ord_base, ord_full = equalize_predictions(lin_img, base_out, full_out, p=lstsq_p)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: davep/tinboard # Path: tinboard/messages/tags.py class ClearTags(Message): """Clear any tags being used to filter.""" # Path: tinboard/messages/tags.py class ShowAlsoTaggedWith(TagMessage): """Message to say bookmarks also of this tag should be shown.""" # Path: tinboard/messages/tags.py class ShowTaggedWith(TagMessage): """Message to say bookmarks of this tag should be shown.""" # Path: tinboard/widgets/extended_option_list.py class OptionListEx(OptionList): """The Textual `OptionList` with more navigation keys.""" BINDINGS = [ Binding("s, j", "cursor_down", show=False), Binding("w, k", "cursor_up", show=False), ] def clear_options(self) -> Self: """Workaround for https://github.com/Textualize/textual/issues/3714""" super().clear_options() self._clear_content_tracking() return self # Path: tinboard/widgets/tags.py from typing_extensions import Final, Self from textual import on from textual.binding import Binding from textual.events import Focus from textual.reactive import var from textual.widgets.option_list import Option, OptionDoesNotExist from rich.console import RenderableType from rich.emoji import Emoji from rich.table import Table from ..messages import ClearTags, ShowAlsoTaggedWith, ShowTaggedWith from .extended_option_list import OptionListEx """Defines a widget for picking tags.""" ############################################################################## # Backward compatibility. from __future__ import annotations ############################################################################## # Python imports. ############################################################################## # Textual imports. ############################################################################## # Rich imports. ############################################################################## # Local imports. ############################################################################## class Tags(OptionListEx): """A menu of tags.""" CONTEXT_HELP = """ ## Tag list keys The following keys are available in the list of tags: | Key | Description | | - | - | | <kbd>Enter</kbd> | Show bookmarks with this tag in the bookmark list. | | <kbd>+</kbd> | Add this tag to any tag filter active in the bookmark list. | """ DEFAULT_CSS = """ Tags { &:focus { border: blank; } &> .option-list--option { padding: 0 1; } } """ BINDINGS = [ Binding("enter", "select", "Show tagged", show=True), Binding("+", "also_tagged", "Show also tagged"), ] def _prompt(self, tag: str, count: int) -> RenderableType: """A prompt for the given tag. Args: tag: The tag to build a prompt for. count: The count for that tag. Returns: The prompt for the tag. """ prompt = Table.grid(expand=True) prompt.add_column(ratio=1) prompt.add_column(justify="right") prompt.add_row(tag, f"[dim i]{count}[/]") return prompt def _sorted(self, tags: list[tuple[str, int]]) -> list[tuple[str, int]]: """Sort the tags. Args: tags: The tags to sort. Returns: The tags in the desired sort order. """ return tags def show(self, tags: list[tuple[str, int]]) -> Self: """Show the given list of tags. Args: tags: The tags to show in the widget. Returns: Self. """ self.can_focus = bool(tags) highlighted_tag = ( self.get_option_at_index(self.highlighted).id if self.highlighted is not None else None ) try: return self.clear_options().add_options( [ Option(self._prompt(tag, count), id=tag) for tag, count in self._sorted(tags) ] ) finally: if tags: try: self.highlighted = self.get_option_index(highlighted_tag or "") except OptionDoesNotExist: self.highlighted = 0 def _on_focus(self, _: Focus) -> None: """Highlight the first item on focus, if none highlighted.""" if self.option_count and self.highlighted is None: self.highlighted = 0 @on(OptionListEx.OptionSelected) def _show_tagged(self, event: OptionListEx.OptionSelected) -> None: """Request that bookmarks of a given tag are shown. Args: event: The event to handle. """ if event.option.id is not None:
self.post_message(ShowTaggedWith(event.option.id))
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: buptlihang/CVLM # Path: model/utils.py IMAGE_TOKEN_INDEX = -200 # Path: model/utils.py DEFAULT_IMAGE_TOKEN = "<image>" # Path: model/utils.py DEFAULT_IM_START_TOKEN = "<im_start>" # Path: model/utils.py DEFAULT_IM_END_TOKEN = "<im_end>" # Path: model/utils.py def build_conversation(): conversation = Conversation( system= "A chat between a curious human and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the human's questions.", roles=["USER", "ASSISTANT"], version="TWO", messages=[], offset=0, sep_style=SeparatorStyle.TWO, sep=" ", sep2="</s>", ) return conversation # Path: model/utils.py def load_pretrained_model(model_path, load_8bit=False, load_4bit=False, device_map="auto", device="cuda"): kwargs = {"device_map": device_map} if load_8bit: kwargs['load_in_8bit'] = True elif load_4bit: kwargs['load_in_4bit'] = True kwargs['quantization_config'] = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type='nf4') else: kwargs['torch_dtype'] = torch.float16 tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) model, output_loading_info = AutoModelForCausalLM.from_pretrained( model_path, output_loading_info=True, **kwargs) model.resize_token_embeddings(len(tokenizer)) image_processor = model.model.image_processor if hasattr(model.config, "max_sequence_length"): context_len = model.config.max_sequence_length else: context_len = 2048 return tokenizer, model, image_processor, context_len # Path: model/utils.py def disable_torch_init(): """ Disable the redundant torch default initialization to accelerate model creation. """ import torch setattr(torch.nn.Linear, "reset_parameters", lambda self: None) setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None) # Path: model/utils.py def get_model_name_from_path(model_path): model_path = model_path.strip("/") model_paths = model_path.split("/") if model_paths[-1].startswith('checkpoint-'): return model_paths[-2] + "_" + model_paths[-1] else: return model_paths[-1] # Path: model/utils.py def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None): prompt_chunks = [ tokenizer(chunk).input_ids for chunk in prompt.split('<image>') ] def insert_separator(X, sep): return [ele for sublist in zip(X, [sep] * len(X)) for ele in sublist][:-1] input_ids = [] offset = 0 if len(prompt_chunks) > 0 and len( prompt_chunks[0] ) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id: offset = 1 input_ids.append(prompt_chunks[0][0]) for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)): input_ids.extend(x[offset:]) if return_tensors is not None: if return_tensors == 'pt': return torch.tensor(input_ids, dtype=torch.long) raise ValueError(f'Unsupported tensor type: {return_tensors}') return input_ids # Path: model/utils.py def process_images(images, image_processor, model_cfg): new_images = [] for image in images: image = expand2square( image, tuple(int(x * 255) for x in image_processor.image_mean)) image = image_processor.preprocess( image, return_tensors='pt')['pixel_values'][0] new_images.append(image) if all(x.shape == new_images[0].shape for x in new_images): new_images = torch.stack(new_images, dim=0) return new_images # Path: evaluation/MME/evaluate.py import argparse import torch import os import json import math from tqdm import tqdm from model.utils import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN from model.utils import build_conversation, load_pretrained_model, disable_torch_init, get_model_name_from_path from model.utils import tokenizer_image_token, process_images from torch.utils.data import Dataset, DataLoader from PIL import Image from collections import defaultdict def split_list(lst, n): """Split a list into n (roughly) equal-sized chunks""" chunk_size = math.ceil(len(lst) / n) # integer division return [lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size)] def get_chunk(lst, n, k): chunks = split_list(lst, n) return chunks[k] def get_gt(data_path): GT = {} for category in os.listdir(data_path): category_dir = os.path.join(data_path, category) if not os.path.isdir(category_dir): continue if os.path.exists(os.path.join(category_dir, 'images')): image_path = os.path.join(category_dir, 'images') qa_path = os.path.join(category_dir, 'questions_answers_YN') else: image_path = qa_path = category_dir assert os.path.isdir(image_path), image_path assert os.path.isdir(qa_path), qa_path for file in os.listdir(qa_path): if not file.endswith('.txt'): continue for line in open(os.path.join(qa_path, file)): question, answer = line.strip().split('\t') GT[(category, file, question)] = answer return GT # Custom dataset class class CustomDataset(Dataset): def __init__(self, questions, image_folder, tokenizer, image_processor, model_config): self.questions = questions self.image_folder = image_folder self.tokenizer = tokenizer self.image_processor = image_processor self.model_config = model_config def __getitem__(self, index): line = self.questions[index] image_file = line["image"] qs = line["text"]
qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: vvvm23/TchAIkovsky # Path: data/tokenizer.py def get_pretrained_tokenizer(path: str = "tokenizer.json"): return miditok.REMI.from_pretrained(path) # Path: model/model.py class TchAIkovskyModel(eqx.Module): id_embeddings: eqx.Module pos_embeddings: eqx.Module decoder: eqx.Module norm_out: eqx.Module out_head: eqx.Module dtype: jnp.dtype = eqx.field(static=True) output_dtype: jnp.dtype = eqx.field(static=True) def __init__( self, dim: int, num_heads: int, num_layers: int, vocab_size: int, max_positions: int, head_dim: Optional[int] = None, dropout: float = 0.0, key: PRNGKey = None, dtype: jnp.dtype = jnp.float32, output_dtype: jnp.dtype = jnp.float32, ): self.dtype = dtype self.output_dtype = output_dtype id_embeddings_key, pos_embeddings_key, decoder_key, out_key = jax.random.split(key, 4) self.id_embeddings = eqx.nn.Embedding(vocab_size, dim, key=id_embeddings_key) self.pos_embeddings = eqx.nn.Embedding(max_positions, dim, key=pos_embeddings_key) self.decoder = Decoder( decoder_key, dim, num_heads, num_layers, head_dim=head_dim, dropout=dropout, dtype=dtype, ) self.norm_out = eqx.nn.LayerNorm(dim) self.out_head = eqx.nn.Linear(dim, vocab_size, use_bias=True, key=out_key) def __call__(self, input_ids, position_ids, mask, key=None): causal_mask = make_causal_mask(input_ids)[0] mask = jnp.where(mask, causal_mask, 0) x = jax.vmap(self.id_embeddings)(input_ids) + jax.vmap(self.pos_embeddings)(position_ids) x = self.decoder(x, mask, key) x = jax.vmap(self.norm_out)(x) logits = jax.vmap(self.out_head)(x) logits = logits.astype(self.output_dtype) return logits # Path: utils.py def seed_others(seed): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) # Path: generate.py import json import equinox as eqx import jax import jax.numpy as jnp import numpy as np import orbax.checkpoint as ocp import tqdm from argparse import ArgumentParser from datetime import datetime from pathlib import Path from types import SimpleNamespace from typing import Optional from loguru import logger from miditoolkit import MidiFile from data.tokenizer import get_pretrained_tokenizer from model import TchAIkovskyModel from utils import seed_others def load_config(config_path): with open(config_path, mode="r") as f: data = f.read() json_dict = json.loads(data) return SimpleNamespace(**json_dict) @eqx.filter_jit @eqx.debug.assert_max_traces(max_traces=1) def generate_step(model, inputs, length, key, temperature): logits = model(**inputs) logits = jnp.take(logits, length - 1, axis=0) if temperature == 0.0: # argmax sampling return jnp.argmax(logits, axis=-1) logits = logits / temperature return jax.random.categorical(key, logits, axis=-1) def generate_loop( model, initial_input, temperature, key, max_to_generate: Optional[int] = None, model_max_positions: int = 1024, output_generated_only: bool = False, ) -> np.array: sample_idx = initial_input.shape[0] if output_generated_only: output = [] else: output = initial_input.tolist() if max_to_generate is None: DEFAULT_MAX = 1000 max_to_generate = DEFAULT_MAX input_length = sample_idx + max_to_generate if input_length > model_max_positions - 1: input_length = model_max_positions - 1 position_ids = np.arange(input_length) mask = np.concatenate( [ np.ones((sample_idx,), dtype=bool), np.zeros((input_length - sample_idx,), dtype=bool), ], axis=-1, dtype=bool, ) input_ids = np.pad(initial_input, ((0, input_length - sample_idx),)) # TODO: maybe replace with jax.lax.scan loop for faster generation for _ in tqdm.trange(max_to_generate): key, subkey = jax.random.split(key) inputs = dict(input_ids=input_ids, position_ids=position_ids, mask=mask) token = generate_step(model, inputs, np.array(sample_idx), subkey, temperature).item() output.append(token) if sample_idx < input_length: input_ids[sample_idx] = token mask[sample_idx] = True else: input_ids = np.concatenate([input_ids[1:], np.array([token])], axis=-1) sample_idx = min(input_length - 1, sample_idx + 1) return np.array(output) # tokenizes initial prompt def tokenize_prompt(midi, tokenizer): return tokenizer(midi) # loads prompt MIDI file def file_prompt(path): midi = MidiFile(path) return midi def main(args): logger.info("Beginning generation script.") key = jax.random.PRNGKey(args.seed) logger.info(f"Using PRNG key {args.seed}") seed_others(args.seed) logger.info("Loading config.") config = load_config(args.config) logger.info(f"Loading tokenizer from '{args.tokenizer}'")
tokenizer = get_pretrained_tokenizer(args.tokenizer)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: dazhangyu123/ACMIL # Path: architecture/network.py class Classifier_1fc(nn.Module): def __init__(self, n_channels, n_classes, droprate=0.0): super(Classifier_1fc, self).__init__() self.fc = nn.Linear(n_channels, n_classes) self.droprate = droprate if self.droprate != 0.0: self.dropout = torch.nn.Dropout(p=self.droprate) def forward(self, x): if self.droprate != 0.0: x = self.dropout(x) x = self.fc(x) return x # Path: architecture/network.py class DimReduction(nn.Module): def __init__(self, n_channels, m_dim=512, numLayer_Res=0): super(DimReduction, self).__init__() self.fc1 = nn.Linear(n_channels, m_dim, bias=False) self.relu1 = nn.ReLU(inplace=True) self.numRes = numLayer_Res self.resBlocks = [] for ii in range(numLayer_Res): self.resBlocks.append(residual_block(m_dim)) self.resBlocks = nn.Sequential(*self.resBlocks) def forward(self, x): x = self.fc1(x) x = self.relu1(x) if self.numRes > 0: x = self.resBlocks(x) return x # Path: architecture/ibmil.py import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from architecture.network import Classifier_1fc, DimReduction class Attention_Gated(nn.Module): def __init__(self, L=512, D=128, K=1): super(Attention_Gated, self).__init__() self.L = L self.D = D self.K = K self.attention_V = nn.Sequential( nn.Linear(self.L, self.D), nn.Tanh() ) self.attention_U = nn.Sequential( nn.Linear(self.L, self.D), nn.Sigmoid() ) self.attention_weights = nn.Linear(self.D, self.K) def forward(self, x): ## x: N x L A_V = self.attention_V(x) # NxD A_U = self.attention_U(x) # NxD A = self.attention_weights(A_V * A_U) # NxK A = torch.transpose(A, 1, 0) # KxN return A ### K x N class IBMIL(nn.Module): def __init__(self, conf, confounder_dim=128, confounder_merge='cat'): super(IBMIL, self).__init__() self.confounder_merge = confounder_merge assert confounder_merge in ['cat', 'add', 'sub'] self.dimreduction = DimReduction(conf.D_feat, conf.D_inner) self.attention = Attention_Gated(conf.D_inner, 128, 1)
self.classifier = Classifier_1fc(conf.D_inner, conf.n_class, 0)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Kav-K/Described # Path: services/environment_service.py class EnvService: # To be expanded upon later! def __init__(self): self.env = {} @staticmethod def environment_path_with_fallback(env_name, relative_fallback=None): directory = os.getenv(env_name) if directory is not None: return Path(directory).resolve() if relative_fallback: app_relative = (app_root_path() / relative_fallback).resolve() if app_relative.exists(): return app_relative return Path.cwd() @staticmethod def save_path(): share_dir = os.getenv("SHARE_DIR") if share_dir is not None: return Path(share_dir) return app_root_path() @staticmethod def find_shared_file(file_name): share_file_paths = [] share_dir = os.getenv("SHARE_DIR") if share_dir is not None: share_file_paths.append(Path(share_dir) / file_name) share_file_paths.extend( [ app_root_path() / "share" / file_name, app_root_path() / file_name, Path(file_name), ] ) for share_file_path in share_file_paths: if share_file_path.exists(): return share_file_path.resolve() raise ValueError(f"Unable to find shared data file {file_name}") @staticmethod def get_allowed_guilds(): # ALLOWED_GUILDS is a comma separated list of guild ids # It can also just be one guild ID # Read these allowed guilds and return as a list of ints try: allowed_guilds = os.getenv("ALLOWED_GUILDS") except Exception: allowed_guilds = None if allowed_guilds is None: raise ValueError( "ALLOWED_GUILDS is not defined properly in the environment file!" "Please copy your server's guild ID and put it into ALLOWED_GUILDS in the .env file." 'For example a line should look like: `ALLOWED_GUILDS="971268468148166697"`' ) allowed_guilds = ( allowed_guilds.split(",") if "," in allowed_guilds else [allowed_guilds] ) allowed_guilds = [int(guild) for guild in allowed_guilds] return allowed_guilds @staticmethod def get_described_channels(): # ALLOWED_GUILDS is a comma separated list of guild ids # It can also just be one guild ID # Read these allowed guilds and return as a list of ints try: described_channels = os.getenv("DESCRIBED_CHANNELS") except Exception: described_channels = None if described_channels is None: raise ValueError( "DESCRIBED_CHANNELS is not properly defined in your environment file. All channels will be enabled for image descriptions" ) described_channels = ( described_channels.split(",") if "," in described_channels else [described_channels] ) return described_channels @staticmethod def get_discord_token(): try: e2b_key = os.getenv("DISCORD_TOKEN") return e2b_key except Exception: return None @staticmethod def get_openai_api_key(): try: openai_key = os.getenv("OPENAI_API_KEY") return openai_key except Exception: return None @staticmethod def get_admin_roles(): # ADMIN_ROLES is a comma separated list of string roles # It can also just be one role # Read these allowed roles and return as a list of strings try: admin_roles = os.getenv("ADMIN_ROLES") except Exception: admin_roles = None if admin_roles is None: print( "ADMIN_ROLES is not defined properly in the environment file!" "Please copy your server's role and put it into ADMIN_ROLES in the .env file." 'For example a line should look like: `ADMIN_ROLES="Admin"`' ) print("Defaulting to allowing all users to use admin commands...") return [None] admin_roles = ( admin_roles.lower().split(",") if "," in admin_roles else [admin_roles.lower()] ) return admin_roles # Path: services/prompts/image_analysis_prompt.py IMAGE_ANALYSIS_PROMPT = """ You are an image describer. You will be given one or more images and your goal is to describe all of the details in incredible, verbose detail. Pretend as if you are describing an image for a user that is visually impaired, thinking what information would most be useful for them to understand the image holistically. Make note to describe and talk about: - The colors of the image - The shapes of the objects in the image - The objects themselves in the image and what they are - Actions happening in the image - The scenery and landscape of the image - The emotions of the people in the image - The weather of the image - The time of day of the image - The set and setting of the image holistically. - Always perform OCR and extract all the text from the image when possible. Always respond in third person, talk about the image provided in third person and describe it as if you are describing it to someone who is visually impaired. Be incredibly, very brief and concise while still conveying all the information possible. Now, describe an image. They will be given to you: """ # Path: services/openai_service.py import traceback import aiohttp import backoff from services.environment_service import EnvService from services.prompts.image_analysis_prompt import IMAGE_ANALYSIS_PROMPT def backoff_handler_request(details): print( f"Backing off {details['wait']:0.1f} seconds after {details['tries']} tries calling function {details['target']} | " f"{details['exception'].args[0]}" ) class OpenAIExecutor: def __init__(self): self.openai_api_key = EnvService.get_openai_api_key() try:
self.ANALYSIS_PRETEXT = IMAGE_ANALYSIS_PROMPT
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: juftin/hatch-pip-compile # Path: hatch_pip_compile/exceptions.py class HatchPipCompileError(Exception): """ Base exception for hatch-pip-compile """ # Path: hatch_pip_compile/installer.py class PluginInstaller(ABC): """ Package Installer for the plugin This abstract base class is used to define the interface for how the plugin should install packages and dependencies. """ environment: "PipCompileEnvironment" @abstractmethod def install_dependencies(self) -> None: """ Install the dependencies """ def sync_dependencies(self) -> None: """ Sync the dependencies - same as `install_dependencies` """ self.install_dependencies() def install_project(self) -> None: """ Install the project (`--no-deps`) """ with self.environment.safe_activation(): self.environment.plugin_check_command( self.environment.construct_pip_install_command( args=["--no-deps", str(self.environment.root)] ) ) def install_project_dev_mode(self) -> None: """ Install the project in editable mode (`--no-deps`) """ with self.environment.safe_activation(): self.environment.plugin_check_command( self.environment.construct_pip_install_command( args=["--no-deps", "--editable", str(self.environment.root)] ) ) # Path: tests/conftest.py class PipCompileFixture: """ Testing Fixture Data Container """ __test__ = False isolation: pathlib.Path toml_doc: tomlkit.TOMLDocument pyproject: pathlib.Path project: Project platform: Platform isolated_data_dir: pathlib.Path application: Application = field(init=False) default_environment: PipCompileEnvironment = field(init=False) test_environment: PipCompileEnvironment = field(init=False) def __post_init__(self) -> None: """ Post Init """ self.application = Application( exit_func=lambda x: None, # noqa: ARG005 verbosity=0, interactive=False, enable_color=False, ) self.application.data_dir = self.isolated_data_dir self.application.project = self.project self.default_environment = self.reload_environment("default") self.test_environment = self.reload_environment("test") def reload_environment(self, environment: str) -> PipCompileEnvironment: """ Reload a new environment given the current state of the isolated project """ new_project = Project(self.isolation) return PipCompileEnvironment( root=self.isolation, metadata=new_project.metadata, name=environment, config=new_project.config.envs[environment], matrix_variables={}, data_directory=self.isolated_data_dir, isolated_data_directory=self.isolated_data_dir, platform=self.platform, verbosity=0, ) def update_pyproject(self) -> None: """ Update pyproject.toml """ tomlkit.dump(self.toml_doc, self.pyproject.open("w")) # Path: tests/test_installer.py from typing import Dict, Type from unittest.mock import Mock from hatch_pip_compile.exceptions import HatchPipCompileError from hatch_pip_compile.installer import PluginInstaller from tests.conftest import PipCompileFixture import pytest """ Installation Tests """ def test_pip_install_dependencies(mock_check_command: Mock, pip_compile: PipCompileFixture) -> None: """ Assert the `pip` installation command is called with the expected arguments """ pip_compile.default_environment.create() pip_compile.default_environment.installer.install_dependencies() expected_call = [ "python", "-u", "-m", "pip", "install", "--disable-pip-version-check", "--no-python-version-warning", "-q", "--requirement", ] call_args = list(mock_check_command.call_args)[0][0][:-1] assert call_args == expected_call @pytest.mark.parametrize("installer", ["pip", "pip-sync"]) def test_installer_type( installer: str, installer_dict: Dict[str, Type[PluginInstaller]], pip_compile: PipCompileFixture ) -> None: """ Test the `pip-compile-installer` configuration option """ pip_compile.toml_doc["tool"]["hatch"]["envs"]["default"]["pip-compile-installer"] = installer pip_compile.update_pyproject() updated_environment = pip_compile.reload_environment("default") assert isinstance(updated_environment.installer, installer_dict[installer]) def test_installer_unknown(pip_compile: PipCompileFixture) -> None: """ Test that an exception is raised when an unknown installer is configured """ pip_compile.toml_doc["tool"]["hatch"]["envs"]["default"]["pip-compile-installer"] = "unknown" pip_compile.update_pyproject()
with pytest.raises(HatchPipCompileError):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: google-deepmind/pix2act # Path: pix2act/common/tf_utils.py def add_bytes_feature( example: tf.train.Example, key: str, value: bytes ) -> None: def add_text_feature(example: tf.train.Example, key: str, value: str) -> None: def get_bytes_feature(example: tf.train.Example, key: str) -> bytes: def get_text_feature(example: tf.train.Example, key: str) -> str: def _get_hash(key: str) -> int: def _increment_counter(item, counter): def __init__( self, output_dir: str, validation_percent: Optional[int] = 10, train_file_name: str = "train.tfr", val_file_name: str = "val.tfr", ): def _partition_index( self, example: tf.train.Example, unused_num_partitions: int ) -> int: def expand(self, pcoll): class SplitAndWriteTFRecords(beam.PTransform): # Path: pix2act/tasks/miniwob/search/reward_utils.py STEP_PENALTY = -1.0 / 30 VALUE_FN_SCALAR = 30 _NORMALIZED_REWARD_THRESHOLD = 0.9 def compute_surrogate_reward(raw_reward, steps_to_go=0): def surrogate_reward_to_value_fn_target(surrogate_reward): def value_fn_output_to_surrogate_reward(value_fn_output): # Path: pix2act/tasks/miniwob/search/write_value_fn_tf_examples.py from absl import app from absl import flags from pix2act.common import tf_utils from pix2act.tasks.miniwob import episode_pb2 from pix2act.tasks.miniwob.search import reward_utils import apache_beam as beam import tensorflow as tf # Copyright 2023 The pix2act Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Converts episodes to tf examples for training value function approximator. """ FLAGS = flags.FLAGS flags.DEFINE_list("inputs", "", "Input tfrecord files of Episodes.") flags.DEFINE_string("output_dir", "", "Output location for tf examples.") flags.DEFINE_float( "reward_threshold", 0.8, "Demonstrations below this threshold will be discarded.", ) class ConvertEpisode(beam.DoFn): """Convert episode to tf examples.""" def process(self, episode): if not episode.task_name: beam.metrics.Metrics.counter("ConvertEpisode", "no_task_name").inc() elif not episode.steps: beam.metrics.Metrics.counter("no_steps", episode.task_name).inc() elif episode.raw_reward < FLAGS.reward_threshold: beam.metrics.Metrics.counter( "failed_demonstration", episode.task_name ).inc() else: beam.metrics.Metrics.counter("num_demos", episode.task_name).inc() try: total_steps = len(episode.steps) for step_idx, step in enumerate(episode.steps): steps_to_go = total_steps - step_idx surrogate_reward = reward_utils.compute_surrogate_reward( episode.raw_reward, steps_to_go ) value_fn_target = reward_utils.surrogate_reward_to_value_fn_target( surrogate_reward ) example = tf.train.Example()
tf_utils.add_bytes_feature(example, "image", step.screenshot_png)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: zhang-tao-whu/DVIS_Plus # Path: mask2former/modeling/transformer_decoder/maskformer_transformer_decoder.py def build_transformer_decoder(cfg, in_channels, mask_classification=True): """ Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`. """ name = cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification) # Path: mask2former/modeling/pixel_decoder/fpn.py def build_pixel_decoder(cfg, input_shape): """ Build a pixel decoder from `cfg.MODEL.MASK_FORMER.PIXEL_DECODER_NAME`. """ name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape) forward_features = getattr(model, "forward_features", None) if not callable(forward_features): raise ValueError( "Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. " f"Please implement forward_features for {name} to only return mask features." ) return model # Path: mask2former/modeling/meta_arch/mask_former_head.py import logging import fvcore.nn.weight_init as weight_init from copy import deepcopy from typing import Callable, Dict, List, Optional, Tuple, Union from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.layers import Conv2d, ShapeSpec, get_norm from detectron2.modeling import SEM_SEG_HEADS_REGISTRY from ..transformer_decoder.maskformer_transformer_decoder import build_transformer_decoder from ..pixel_decoder.fpn import build_pixel_decoder # Copyright (c) Facebook, Inc. and its affiliates. @SEM_SEG_HEADS_REGISTRY.register() class MaskFormerHead(nn.Module): _version = 2 def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): version = local_metadata.get("version", None) if version is None or version < 2: # Do not warn if train from scratch scratch = True logger = logging.getLogger(__name__) for k in list(state_dict.keys()): newk = k # if "sem_seg_head" in k and not k.startswith(prefix + "predictor"): # newk = k.replace(prefix, prefix + "pixel_decoder.") # # logger.debug(f"{k} ==> {newk}") if newk != k: state_dict[newk] = state_dict[k] del state_dict[k] scratch = False if not scratch: logger.warning( f"Weight format of {self.__class__.__name__} have changed! " "Please upgrade your models. Applying automatic conversion now ..." ) @configurable def __init__( self, input_shape: Dict[str, ShapeSpec], *, num_classes: int, pixel_decoder: nn.Module, loss_weight: float = 1.0, ignore_value: int = -1, return_transformer_feature: bool = False, # extra parameters transformer_predictor: nn.Module, transformer_in_feature: str, ): """ NOTE: this interface is experimental. Args: input_shape: shapes (channels and stride) of the input features num_classes: number of classes to predict pixel_decoder: the pixel decoder module loss_weight: loss weight ignore_value: category id to be ignored during training. transformer_predictor: the transformer decoder that makes prediction transformer_in_feature: input feature name to the transformer_predictor """ super().__init__() input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) self.in_features = [k for k, v in input_shape] feature_strides = [v.stride for k, v in input_shape] feature_channels = [v.channels for k, v in input_shape] self.ignore_value = ignore_value self.common_stride = 4 self.loss_weight = loss_weight self.return_transformer_feature = return_transformer_feature self.pixel_decoder = pixel_decoder self.predictor = transformer_predictor self.transformer_in_feature = transformer_in_feature self.num_classes = num_classes @classmethod def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): # figure out in_channels to transformer predictor if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder": transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "pixel_embedding": transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder": # for maskformer2 transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM else: transformer_predictor_in_channels = input_shape[cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE].channels return { "input_shape": { k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES }, "ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, "return_transformer_feature": cfg.MODEL.SEM_SEG_HEAD.RETURN_TRANSFORMER_FEATURE, "num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, "pixel_decoder": build_pixel_decoder(cfg, input_shape), "loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT, "transformer_in_feature": cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE,
"transformer_predictor": build_transformer_decoder(
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: teamreboott/data-modori # Path: data_modori/utils/constant.py class Fields(object): stats = DEFAULT_PREFIX + 'stats__' meta = DEFAULT_PREFIX + 'meta__' context = DEFAULT_PREFIX + 'context__' suffix = DEFAULT_PREFIX + 'suffix__' # Path: data_modori/utils/constant.py class StatsKeys(object): alpha_token_ratio = 'alpha_token_ratio' korean_token_ratio = 'korean_token_ratio' alnum_ratio = 'alnum_ratio' avg_line_length = 'avg_line_length' char_rep_ratio = 'char_rep_ratio' flagged_words_ratio = 'flagged_words_ratio' lang = 'lang' lang_score = 'lang_score' max_line_length = 'max_line_length' perplexity = 'perplexity' special_char_ratio = 'special_char_ratio' stopwords_ratio = 'stopwords_ratio' text_len = 'text_len' num_token = 'num_token' num_words = 'num_words' word_rep_ratio = 'word_rep_ratio' # Path: data_modori/utils/model_utils.py def prepare_model(lang='en', model_type='sentencepiece', model_key=None): """ Prepare and load a model or a tokenizer from MODEL_ZOO. :param lang: which lang model to load :param model_type: model or tokenizer type :param model_key: tokenizer name, only used when prepare HuggingFace tokenizer :return: a model or tokenizer instance """ type_to_name = { 'fasttext': ('lid.176.bin', prepare_fasttext_model), 'sentencepiece': ('%s.sp.model', prepare_sentencepiece_model), 'kenlm': ('%s.arpa.bin', prepare_kenlm_model), 'nltk': ('punkt.%s.pickle', prepare_nltk_model), 'huggingface': ('%s', prepare_huggingface_tokenizer), 'spacy': ('%s_core_web_md-3.5.0', prepare_diversity_model), 'spacy_ko': ('%s_core_news_md-3.7.0', prepare_diversity_model), 'konlpy': ('%s', prepare_konlpy_model), } assert model_type in type_to_name.keys( ), 'model_type must be one of the following: {}'.format( list(type_to_name.keys())) if model_key is None: model_key = model_type + '_' + lang if model_key not in MODEL_ZOO.keys(): model_name, model_func = type_to_name[model_type] if model_type == 'fasttext': MODEL_ZOO[model_key] = model_func(model_name) elif model_type == 'huggingface': MODEL_ZOO[model_key] = model_func(model_key) elif model_type == 'konlpy': MODEL_ZOO[model_key] = model_func() else: MODEL_ZOO[model_key] = model_func(model_name, lang) return model_key # Path: data_modori/utils/model_utils.py def get_model(model_key, lang='en', model_type='sentencepiece'): """ Get a model or a tokenizer from MODEL_ZOO. :param model_key: name of the model or tokenzier """ if model_key not in MODEL_ZOO: prepare_model(lang=lang, model_type=model_type, model_key=model_key) return MODEL_ZOO.get(model_key, None) # Path: data_modori/ops/base_op.py OPERATORS = Registry('Operators') # Path: data_modori/ops/base_op.py class Filter: def __init__(self, text_key: str = None): """ Base class that removes specific info. :param text_key: the key name of field that stores sample texts to be processed """ if text_key is None: text_key = 'text' self.text_key = text_key from data_modori.core.data import wrap_func_with_nested_access self.process = wrap_func_with_nested_access(self.process) self.compute_stats = wrap_func_with_nested_access(self.compute_stats) def compute_stats(self, sample, context=False): """ Compute stats for the sample which is used as a metric to decide whether to filter this sample. :param sample: input sample. :param context: whether to store context information of intermediate vars in the sample temporarily. :return: sample with computed stats """ raise NotImplementedError def process(self, sample): """ For sample level, sample --> Boolean. :param sample: sample to decide whether to filter :return: true for keeping and false for filtering """ raise NotImplementedError # Path: data_modori/ops/filter/language_id_score_filter.py from jsonargparse.typing import ClosedUnitInterval from loguru import logger from data_modori.utils.constant import Fields, StatsKeys from data_modori.utils.model_utils import prepare_model, get_model from ..base_op import OPERATORS, Filter @OPERATORS.register_module('language_id_score_filter') class LanguageIDScoreFilter(Filter): """Filter to keep samples in a specific language with confidence score larger than a specific min value.""" def __init__(self, lang: str = '', min_score: ClosedUnitInterval = 0.8, *args, **kwargs): """ Initialization method. :param lang: Samples in which language to keep. :param min_score: The min language identification confidence scores of samples to keep. :param args: extra args :param kwargs: extra args """ super().__init__(*args, **kwargs) self.lang = lang self.min_score = min_score self.model_key = prepare_model(lang=lang, model_type='fasttext') def compute_stats(self, sample): # check if it's computed already if StatsKeys.lang in sample[ Fields.stats] and StatsKeys.lang_score in sample[Fields.stats]: return sample text = sample[self.text_key].lower().replace('\n', ' ')
ft_model = get_model(self.model_key, lang=self.lang, model_type='fasttext')
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: 52phm/pylmkit # Path: pylmkit/utils/data_utils.py class Document(BaseModel): page_content: str metadata: dict = Field(default_factory=dict) type: str = "Document" def __str__(self): return f"Document(page_content='{self.page_content}', metadata={self.metadata})" # Path: pylmkit/core/base.py class BaseKnowledgeBase(object): def __init__(self, init_documents=None): self.documents = [] self.splitter_documents = [] if isinstance(init_documents, list): self.documents = init_documents @classmethod def load(cls, filepath, is_return=True, return_mode="doc", extend=True): if filepath.endswith('.json'): data = read_json(filepath) elif filepath.endswith('.yaml') or filepath.endswith('yml'): data = read_yaml(filepath) # data=[{},{}] else: raise Exception(f"The file type is not supported") data_dict_as_document = dict_as_document(data) result = cls()._base(documents=data_dict_as_document, return_mode=return_mode, is_return=is_return, extend=extend) if is_return: return result @classmethod def add(cls, texts, metadatas=None, is_return=True, return_mode="doc", extend=True, types="Document"): data_dict_as_document = text_as_document(texts=texts, metadatas=metadatas, types=types) result = cls()._base(documents=data_dict_as_document, return_mode=return_mode, is_return=is_return, extend=extend) if is_return: return result def split(self, splitter=None, chunk_size=500, chunk_overlap=100, return_mode='doc', **kwargs): if splitter is None: splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap, **kwargs) else: splitter = splitter self.splitter_documents = splitter.split_documents(self.documents) if return_mode == 'doc': return self.splitter_documents else: return document_as_dict(self.splitter_documents) def to_csv_loader(self, filepath, index=False, **kwargs): data = document_as_dict(self.documents) pd.DataFrame(data).to_csv(filepath, index=index, **kwargs) def to_csv_splitter(self, filepath, splitter=None, chunk_size=500, chunk_overlap=100, index=False, splitter_kwargs={}, csv_kwargs={} ): if not self.splitter_documents: self.splitter_documents = self.split(splitter=splitter, chunk_size=chunk_size, chunk_overlap=chunk_overlap, **splitter_kwargs) data = document_as_dict(self.splitter_documents) pd.DataFrame(data).to_csv(filepath, index=index, **csv_kwargs) def clear(self, mode='doc'): if mode == 'doc': self.documents = [] else: self.splitter_documents = [] def _base(self, documents, is_return=True, return_mode='doc', extend=True): if extend: self.documents.extend(documents) # # dict -> Document if is_return: if return_mode == 'doc': return self.documents else: return document_as_dict(self.documents) else: # self.documents = documents # when extend is False, just reset documents if is_return: if return_mode == 'doc': return documents else: return document_as_dict(documents) # Path: pylmkit/tools/search.py from duckduckgo_search import DDGS from pylmkit.utils.data_utils import Document from pylmkit.core.base import BaseKnowledgeBase class WebSearch(DDGS, BaseKnowledgeBase): def __init__( self, topk=5, backend="api", region="wt-wt", timelimit=None, safesearch="moderate", init_documents=None, timeout=10, headers=None, proxies=None ): DDGS.__init__( self, timeout=timeout, headers=headers, proxies=proxies ) BaseKnowledgeBase.__init__(self, init_documents=init_documents) self.topk = int(topk) self.backend = backend self.region = region self.timelimit = timelimit self.safesearch = safesearch def get(self, keyword): if keyword: search_gen = super().text(keywords=keyword, backend=self.backend, region=self.region, max_results=self.topk, timelimit=self.timelimit, safesearch=self.safesearch ) for i, page in enumerate(list(search_gen)): if page:
self.documents.append(Document(
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: hadican/failedkite # Path: config.py class Config: def __init__(self): self.slack_token = self._get_env_variable('SLACK_TOKEN') self.default_slack_email = self._get_env_variable('DEFAULT_SLACK_EMAIL') self.author_mapping = self._load_author_mapping('/config/author_mapping.yml') @staticmethod def _get_env_variable(name): value = os.environ.get(name) if not value: raise ValueError(f"{name} environment variable not set") return value @staticmethod def _load_author_mapping(file_path): with open(file_path, 'r') as mapping_file: return yaml.safe_load(mapping_file) # Path: notification_service.py class NotificationService: def __init__(self, slack_client, config): self.slack_client = slack_client self.config = config self.default_user_id = self.slack_client.get_user_id_by_email(self.config.default_slack_email) if self.default_user_id is None: raise Exception(f"Failed to retrieve default Slack user ID for the email: {self.config.default_slack_email}") self.logger = logging.getLogger(self.__class__.__name__) def notify(self, build): build_web_url = build['web_url'] build_source = build.get("source") if build_source == "schedule": trigger_message = f"Not a human-triggered job, no action was taken for the build url={build_web_url}" self.logger.info(trigger_message) return trigger_message, 200 fail_statuses = ["failing", "failed"] build_status = build.get("state") if build_status not in fail_statuses: build_status_message = f"Not a build failure, no action taken for the build url={build_web_url}." return build_status_message, 200 build_creator = build.get("creator") build_author = build.get("author") if build_creator: email = build_creator.get("email") elif build_author: username = build_author.get("username") slack_email = self.config.author_mapping.get(username) if slack_email: email = slack_email else: build_author_message = f"No user was found in the author mapping with the username={username} for the failing build url={build_web_url}" self.slack_client.send_message(build_author_message, self.default_user_id) return build_author_message, 500 else: build_user_message = f"No user was found for the failing build url={build_web_url}" self.slack_client.send_message(build_user_message, self.default_user_id) return build_user_message, 500 user_id = self.slack_client.get_user_id_by_email(email) if not user_id: user_message = f"Failed to fetch user ID from Slack for email={email} for build url={build_web_url}" return user_message, 500 buildkite_message = f"Your build has `failed`. Here is the URL to check: {build_web_url}" status = self.slack_client.send_message(buildkite_message, user_id) if status: return buildkite_message, 200 else: return "Failed to send Slack message.", 500 # Path: slack_client.py class SlackClient: def __init__(self, token): self.client = WebClient(token=token) self.logger = logging.getLogger(self.__class__.__name__) def get_user_id_by_email(self, email): try: response = self.client.users_lookupByEmail(email=email) if response["ok"]: return response["user"]["id"] else: self.logger.error("Failed to fetch user ID for email=%s error=%s", email, response["error"]) return None except Exception as e: self.logger.error("Error fetching user ID for email=%s error=%s", email, str(e)) return None def send_message(self, message, user_id): try: response = self.client.chat_postMessage(channel=user_id, text=message) if response["ok"]: return True else: self.logger.error("Failed to send Slack message with error=%s", response["error"]) return False except Exception as e: self.logger.error("Failed to send Slack message with error=%s", str(e)) return False # Path: app.py import logging from flask import Flask, request from config import Config from notification_service import NotificationService from slack_client import SlackClient app = Flask(__name__) logging.basicConfig(level=logging.INFO) config = Config() slack_client = SlackClient(token=config.slack_token)
notification_service = NotificationService(slack_client, config)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: PufferAI/pokegym # Path: pokegym/pyboy_binding.py ACTIONS = (Down, Left, Right, Up, A, B, Start, Select) # Path: pokegym/pyboy_binding.py def make_env(gb_path, headless=True, quiet=False, **kwargs): gb_path='pokemon_red.gb' game = PyBoy( gb_path, debugging=False, window_type='headless' if headless else 'SDL2', hide_window=quiet, **kwargs, ) screen = game.botsupport_manager().screen() if not headless: game.set_emulation_speed(6) return game, screen # Path: pokegym/pyboy_binding.py def open_state_file(path): '''Load state file with BytesIO so we can cache it''' with open(path, 'rb') as f: initial_state = BytesIO(f.read()) return initial_state # Path: pokegym/pyboy_binding.py def load_pyboy_state(pyboy, state): '''Reset state stream and load it into PyBoy''' state.seek(0) pyboy.load_state(state) # Path: pokegym/pyboy_binding.py def run_action_on_emulator(pyboy, screen, action, headless=True, fast_video=True, frame_skip=24): '''Sends actions to PyBoy''' press, release = action.PRESS, action.RELEASE pyboy.send_input(press) if headless or fast_video: pyboy._rendering(False) frames = [] for i in range(frame_skip): if i == 8: # Release button after 8 frames pyboy.send_input(release) if not fast_video: # Save every frame frames.append(screen.screen_ndarray()) if i == frame_skip - 1: pyboy._rendering(True) pyboy.tick() if fast_video: # Save only the last frame frames.append(screen.screen_ndarray()) # Path: pokegym/ram_map.py HP_ADDR = [0xD16C, 0xD198, 0xD1C4, 0xD1F0, 0xD21C, 0xD248] MAX_HP_ADDR = [0xD18D, 0xD1B9, 0xD1E5, 0xD211, 0xD23D, 0xD269] PARTY_SIZE_ADDR = 0xD163 PARTY_ADDR = [0xD164, 0xD165, 0xD166, 0xD167, 0xD168, 0xD169] PARTY_LEVEL_ADDR = [0xD18C, 0xD1B8, 0xD1E4, 0xD210, 0xD23C, 0xD268] POKE_XP_ADDR = [0xD179, 0xD1A5, 0xD1D1, 0xD1FD, 0xD229, 0xD255] CAUGHT_POKE_ADDR = range(0xD2F7, 0xD309) SEEN_POKE_ADDR = range(0xD30A, 0xD31D) OPPONENT_LEVEL_ADDR = [0xD8C5, 0xD8F1, 0xD91D, 0xD949, 0xD975, 0xD9A1] X_POS_ADDR = 0xD362 Y_POS_ADDR = 0xD361 MAP_N_ADDR = 0xD35E BADGE_1_ADDR = 0xD356 OAK_PARCEL_ADDR = 0xD74E OAK_POKEDEX_ADDR = 0xD74B OPPONENT_LEVEL = 0xCFF3 ENEMY_POKE_COUNT = 0xD89C EVENT_FLAGS_START_ADDR = 0xD747 EVENT_FLAGS_END_ADDR = 0xD761 MUSEUM_TICKET_ADDR = 0xD754 MONEY_ADDR_1 = 0xD347 MONEY_ADDR_100 = 0xD348 MONEY_ADDR_10000 = 0xD349 def bcd(num): def bit_count(bits): def read_bit(game, addr, bit) -> bool: def read_uint16(game, start_addr): def position(game): def party(game): def opponent(game): def oak_parcel(game): def pokedex_obtained(game): def pokemon_seen(game): def pokemon_caught(game): def hp(game): def money(game): def badges(game): def events(game): # Path: pokegym/game_map.py MAP_PATH = __file__.rstrip('game_map.py') + 'map_data.json' MAP_DATA = json.load(open(MAP_PATH, 'r'))['regions'] MAP_DATA = {int(e['id']): e for e in MAP_DATA} def local_to_global(r, c, map_n): # Path: pokegym/environment.py from pdb import set_trace as T from gymnasium import Env, spaces from pokegym.pyboy_binding import (ACTIONS, make_env, open_state_file, load_pyboy_state, run_action_on_emulator) from pokegym import ram_map, game_map import numpy as np import os def play(): '''Creates an environment and plays it''' env = Environment(rom_path='pokemon_red.gb', state_path=None, headless=False, disable_input=False, sound=False, sound_emulated=False, verbose=True ) env.reset() env.game.set_emulation_speed(1) # Display available actions print("Available actions:")
for idx, action in enumerate(ACTIONS):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: AlexandrErohin/home-assistant-flightradar24 # Path: custom_components/flightradar24/models.py class BoundingBox: """Bounding box for retrieving state vectors.""" min_latitude: float max_latitude: float min_longitude: float max_longitude: float def validate(self) -> None: """Validate if the latitude and longitude are correct.""" self._check_latitude(self.min_latitude) self._check_latitude(self.max_latitude) self._check_longitude(self.min_longitude) self._check_longitude(self.max_longitude) def get_string(self) -> str: return "{},{},{},{}".format(self.max_latitude, self.min_latitude, self.min_longitude, self.max_longitude) @staticmethod def _check_latitude(degrees: float) -> None: if degrees < -90 or degrees > 90: msg = f"Invalid latitude {degrees}! Must be in [-90, 90]." raise Exception(msg) @staticmethod def _check_longitude(degrees: float) -> None: if degrees < -180 or degrees > 180: msg = f"Invalid longitude {degrees}! Must be in [-180, 180]." raise Exception(msg) # Path: custom_components/flightradar24/const.py DOMAIN = "flightradar24" # Path: custom_components/flightradar24/const.py URL = 'https://www.flightradar24.com/' # Path: custom_components/flightradar24/const.py DEFAULT_NAME = "FlightRadar24" # Path: custom_components/flightradar24/const.py EVENT_FLIGHTRADAR24_ENTRY = f"{DOMAIN}_entry" # Path: custom_components/flightradar24/const.py EVENT_FLIGHTRADAR24_EXIT = f"{DOMAIN}_exit" # Path: custom_components/flightradar24/coordinator.py from typing import Any from datetime import timedelta from homeassistant.core import HomeAssistant from homeassistant.helpers.update_coordinator import DataUpdateCoordinator from homeassistant.helpers.device_registry import DeviceInfo from .models import BoundingBox from .const import ( DOMAIN, URL, DEFAULT_NAME, EVENT_FLIGHTRADAR24_ENTRY, EVENT_FLIGHTRADAR24_EXIT, ) from logging import Logger from FlightRadar24 import FlightRadar24API import math import pycountry from __future__ import annotations class FlightRadar24Coordinator(DataUpdateCoordinator[int]): def __init__( self, hass: HomeAssistant, bound: BoundingBox, client: FlightRadar24API, update_interval: int, logger: Logger, ) -> None: self._bound = bound self._client = client self._logger = logger self.tracked: dict[int, dict[str, Any]] | None = None self.entered = {} self.exited = {} self.device_info = DeviceInfo( configuration_url=URL,
identifiers={(DOMAIN, DEFAULT_NAME)},
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: ej0cl6/TextEE # Path: TextEE/models/QueryAndExtract/metadata.py class Metadata(object): def __init__(self, metadata_path, dataset, type_set): self.pos_set = ['ADJ', 'ADP', 'ADV', 'AUX', 'CCONJ', 'DET', 'INTJ', 'NOUN', 'NUM', 'PART', 'PRON', 'PROPN', 'PUNCT', 'SCONJ', 'SYM', 'VERB', 'X'] self.pos2id = dict((v, i) for v, i in zip(sorted(self.pos_set), range(len(self.pos_set)))) self.entity_to_ids = {'FAC': 0, 'GPE': 1, 'LOC': 2, 'ORG': 3, 'PER': 4, 'VEH': 5, 'WEA': 6, 'O': 7, '[PAD]': 8} with open(metadata_path, 'r') as j: meta = json.loads(j.read()) self.dataset = dataset self.type_set = type_set self.metadata = DatasetFactContainer(meta[dataset], type_set=type_set) def __str__(self): return '\n'.join(['%s:%s' % item for item in self.__dict__.items()]) # Path: TextEE/models/QueryAndExtract/utils.py def pad_seq(data, pad_value=0, dtype='long'): N = len(data) for i in range(N): data[i] = np.array(data[i]) maxlen = max([len(x) for x in data]) data = pad_sequences(data, maxlen=maxlen, dtype=dtype, truncating="post", padding="post", value=pad_value) return torch.Tensor(data).cuda() # Path: TextEE/models/QueryAndExtract/EAEmodel.py import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import ipdb import ipdb from transformers import BertConfig, RobertaConfig, BertModel, RobertaModel from .metadata import Metadata from .utils import pad_seq from keras_preprocessing.sequence import pad_sequences class QueryAndExtractEAEModel(nn.Module): def __init__(self, config, tokenizer, type_set): super().__init__() self.config = config self.tokenizer = tokenizer self.type_set = type_set self.earl_model = EARLModel(config, tokenizer, type_set) self.ner_model = NERModel(config, tokenizer) def forward(self, batch): ner_loss = self.ner_model(batch) loss, score = self.earl_model(batch) return loss, score, ner_loss class EARLModel(nn.Module): def __init__(self, config, tokenizer, type_set): super().__init__() self.config = config self.tokenizer = tokenizer self.tokenizer_pad_value = self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_token])[0] self.type_set = type_set
self.metadata = Metadata(config.metadata_path, self.config.dataset, type_set)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: fofr/cog-sdxl-multi-controlnet-lora # Path: controlnet_preprocess.py class ControlNetPreprocessor: ANNOTATOR_CLASSES = { "none": None, "edge_canny": CannyDetector, "depth_leres": LeresDetector, "depth_midas": MidasDetector, "soft_edge_pidi": PidiNetDetector, "soft_edge_hed": HEDdetector, "lineart": LineartDetector, "lineart_anime": LineartAnimeDetector, "openpose": OpenposeDetector, # "straight_edge_mlsd": None, # "face_detector": None, # "content_shuffle": None, # "normal_bae": None, # "segementation_sam": None, } ANNOTATOR_NAMES = list(ANNOTATOR_CLASSES.keys()) def __init__(self, predictor): WeightsDownloader.download_if_not_exists( CONTROLNET_PREPROCESSOR_URL, CONTROLNET_PREPROCESSOR_MODEL_CACHE ) self.annotators = {} self.predictor = predictor torch.device("cuda") @staticmethod def get_annotator_names(): return ControlNetPreprocessor.ANNOTATOR_NAMES def initialize_detector( self, detector_class, model_name="lllyasviel/Annotators", **kwargs ): print(f"Initializing {detector_class.__name__}") if hasattr(detector_class, 'from_pretrained'): return detector_class.from_pretrained( model_name, cache_dir=CONTROLNET_PREPROCESSOR_MODEL_CACHE, **kwargs, ) else: return detector_class(**kwargs) def annotators_list(self): return list(self.annotators.keys()) def process_image(self, image, annotator): print(f"Processing image with {annotator}") if annotator not in self.annotators: self.annotators[annotator] = self.initialize_detector( self.ANNOTATOR_CLASSES[annotator] ) return self.annotators[annotator](image) # Path: weights_downloader.py class WeightsDownloader: @staticmethod def download_if_not_exists(url, dest): if not os.path.exists(dest): WeightsDownloader.download(url, dest) @staticmethod def download(url, dest): start = time.time() print("downloading url: ", url) print("downloading to: ", dest) subprocess.check_call(["pget", "-x", url, dest], close_fds=False) print("downloading took: ", time.time() - start) # Path: controlnet.py import torch from diffusers import ControlNetModel from controlnet_preprocess import ControlNetPreprocessor from weights_downloader import WeightsDownloader CONTROLNET_MODEL_CACHE = "./controlnet-cache" CONTROLNET_URL = "https://weights.replicate.delivery/default/controlnet/sdxl-cn-canny-depth-softe-pose-qr.tar" class ControlNet: CONTROLNET_MODELS = [ "none", "edge_canny", "illusion", "depth_leres", "depth_midas", "soft_edge_pidi", "soft_edge_hed", "lineart", "lineart_anime", "openpose", # Preprocessors without an XL model yet # "straight_edge_mlsd", # "face_detector", # "content_shuffle", # "normal_bae", # "segementation_sam", ] def __init__(self, predictor): WeightsDownloader.download_if_not_exists(CONTROLNET_URL, CONTROLNET_MODEL_CACHE) self.predictor = predictor self.controlnet_preprocessor = None self.models = {} def initialize_controlnet(self, model_name): print("Initializing", model_name) return ControlNetModel.from_pretrained( model_name, cache_dir=CONTROLNET_MODEL_CACHE, torch_dtype=torch.float16 ) def get_model(self, controlnet_name): if controlnet_name not in self.models: if controlnet_name.startswith("edge_"): self.models[controlnet_name] = self.initialize_controlnet("diffusers/controlnet-canny-sdxl-1.0") elif controlnet_name.startswith("depth_"): self.models[controlnet_name] = self.initialize_controlnet("diffusers/controlnet-depth-sdxl-1.0-small") elif controlnet_name.startswith("soft_edge") or controlnet_name.startswith("lineart"): self.models[controlnet_name] = self.initialize_controlnet("SargeZT/controlnet-sd-xl-1.0-softedge-dexined") elif controlnet_name == "openpose": self.models[controlnet_name] = self.initialize_controlnet("thibaud/controlnet-openpose-sdxl-1.0") elif controlnet_name == "illusion": self.models[controlnet_name] = self.initialize_controlnet("monster-labs/control_v1p_sdxl_qrcode_monster") return self.models.get(controlnet_name) def get_models(self, controlnet_names): models = [ self.get_model(controlnet_name) for controlnet_name in controlnet_names ] return list(filter(None, models)) def preprocess(self, image, controlnet_name): # Illusion model needs no preprocessing if controlnet_name == "illusion" or controlnet_name == "none": return image if self.controlnet_preprocessor is None:
self.controlnet_preprocessor = ControlNetPreprocessor(self.predictor)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: ahayler/s4c # Path: utils/array_operations.py def to(data, device, non_blocking=True): if isinstance(data, dict): return {k: to(data[k], device, non_blocking=non_blocking) for k in data.keys()} elif isinstance(data, list): return [to(v, device, non_blocking=non_blocking) for v in data] else: return data.to(device, non_blocking=non_blocking) # Path: utils/metrics.py class MeanMetric(Metric): def __init__(self, output_transform=lambda x: x["output"], device="cpu"): self._sum = None self._num_examples = None self.required_output_keys = () super(MeanMetric, self).__init__(output_transform=output_transform, device=device) @reinit__is_reduced def reset(self): self._sum = torch.tensor(0, device=self._device, dtype=float) self._num_examples = 0 super(MeanMetric, self).reset() @reinit__is_reduced def update(self, value): if torch.any(torch.isnan(torch.tensor(value))): return self._sum += value self._num_examples += 1 @sync_all_reduce("_num_examples:SUM", "_sum:SUM") def compute(self): if self._num_examples == 0: raise NotComputableError('CustomAccuracy must have at least one example before it can be computed.') return self._sum.item() / self._num_examples @torch.no_grad() def iteration_completed(self, engine: Engine) -> None: output = self._output_transform(engine.state.output) self.update(output) # Path: utils/base_trainer.py import json import time import ignite import ignite.distributed as idist import torch from datetime import datetime from pathlib import Path from typing import Union from omegaconf import OmegaConf from ignite.contrib.engines import common from ignite.contrib.handlers import TensorboardLogger from ignite.contrib.handlers.base_logger import BaseHandler from ignite.engine import Engine, Events, EventEnum from ignite.handlers import Checkpoint, DiskSaver, global_step_from_engine from ignite.utils import manual_seed, setup_logger from torch.cuda.amp import autocast, GradScaler from utils.array_operations import to from utils.metrics import MeanMetric from torch.backends import cudnn # used for debugging torch.autograd.set_detect_anomaly(True) def base_training(local_rank, config, get_dataflow, initialize, get_metrics, visualize): # copy the segmentation mode to the data and model_conf part of the config config['data']['segmentation_mode'] = config.get("segmentation_mode", None) config['model_conf']['segmentation_mode'] = config.get("segmentation_mode", None) rank = idist.get_rank() manual_seed(config["seed"] + rank) device = idist.device() logger = setup_logger(name=config["name"]) log_basic_info(logger, config) output_path = config["output_path"] if rank == 0: if config["stop_iteration"] is None: now = datetime.now().strftime("%Y%m%d-%H%M%S") else: now = f"stop-on-{config['stop_iteration']}" folder_name = f"{config['name']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}" output_path = Path(output_path) / folder_name if not output_path.exists(): output_path.mkdir(parents=True) config["output_path"] = output_path.as_posix() logger.info(f"Output path: {config['output_path']}") if "cuda" in device.type: config["cuda device name"] = torch.cuda.get_device_name(local_rank) # Setup dataflow, model, optimizer, criterion loaders = get_dataflow(config, logger) if len(loaders) == 2: train_loader, test_loader = loaders vis_loader = None else: train_loader, test_loader, vis_loader = loaders if hasattr(train_loader, "dataset"): logger.info(f"Dataset length: Train: {len(train_loader.dataset)}, Test: {len(test_loader.dataset)}") config["num_iters_per_epoch"] = len(train_loader) model, optimizer, criterion, lr_scheduler = initialize(config, logger) logger.info(f"Model parameters: {sum(p.numel() for p in model.parameters())}") # Let's now setup evaluator engine to perform model's validation and compute metrics metrics = get_metrics(config, device)
metrics_loss = {k: MeanMetric((lambda y: lambda x: x["loss_dict"][y])(k)) for k in criterion.get_loss_metric_names()}
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Emmo00/alxcheck # Path: alxcheck/utils/error_logging.py def print_no_module_docstring(file_path): print(Fore.RED + f"{file_path} does not have Module DocString" + Fore.RESET) # Path: alxcheck/utils/error_logging.py def print_no_function_docstring(file_path, function_name): print( Fore.RED + f"In {file_path}, the {function_name} function has no Function DocString" + Fore.RESET ) # Path: alxcheck/utils/error_logging.py def print_no_class_docstring(file_path, class_name): print( Fore.RED + f"In {file_path}, the {class_name} class has no Class DocString" + Fore.RESET ) # Path: alxcheck/utils/error_logging.py def print_check_docstrings(file_path): print(Fore.RED + f"Error: Check docstrings in {file_path}" + Fore.RESET) # Path: alxcheck/utils/error_logging.py def print_error_parsing_file(file_path): import ast try: with open(file_path, "r") as f: ast.parse(f.read()) except SyntaxError as syntax_error: print( Fore.RED + f"SyntaxError\n\tFile: {file_path}\n\tLine: {syntax_error.lineno}\tMessage: {syntax_error.msg}" + Fore.RESET ) except Exception as e: print(Fore.RED + f"Error Parsing File:\n\t{type(e)}" + Fore.RESET) # Path: alxcheck/checks/python.py import os import ast import subprocess from ..utils.error_logging import ( print_no_module_docstring, print_no_function_docstring, print_no_class_docstring, print_check_docstrings, print_error_parsing_file, ) def check_file_is_executable(file_path): flag = True if not os.access(file_path, os.X_OK): flag = False return flag def check_python_shebang(file_path): flag = True with open(file_path, "rb") as f: first_line = f.readline().strip() if first_line not in (b"#!/usr/bin/python3", b"#!/usr/bin/env python3"): flag = False return flag def check_module_function_class_documentation(file_path): flag = True with open(file_path, "rb") as f: content = f.read() # remove shebang if content.startswith(b"#!"): if len(content.split(b"\n")) < 2: content = "" else: content = content.split(b"\n", 1)[1] tree = None try: tree = ast.parse(content) except Exception: print_error_parsing_file(file_path) try: if tree is None: return for node in ast.walk(tree): # check module docstring if isinstance(node, ast.Module): if not isinstance(node.body[0].value, ast.Str): flag = False print_no_module_docstring(file_path) return # check function docstring if isinstance(node, ast.FunctionDef) and not isinstance( node.body[0].value, ast.Str ): flag = False print_no_function_docstring(file_path, node.name) # check class docstring if isinstance(node, ast.ClassDef) and not isinstance( node.body[0].value, ast.Str ): flag = False print_no_class_docstring(file_path, node.name) except Exception:
print_check_docstrings(file_path)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: TimbreWatermarking/TimbreWatermarking # Path: voice.clone/Fastspeech2/TTS/tts/utils/text/english/abbreviations.py # Path: voice.clone/Fastspeech2/TTS/tts/utils/text/english/number_norm.py def normalize_numbers(text): text = re.sub(_comma_number_re, _remove_commas, text) text = re.sub(_currency_re, _expand_currency, text) text = re.sub(_decimal_number_re, _expand_decimal_point, text) text = re.sub(_ordinal_re, _expand_ordinal, text) text = re.sub(_number_re, _expand_number, text) return text # Path: voice.clone/Fastspeech2/TTS/tts/utils/text/english/time_norm.py def expand_time_english(text: str) -> str: return re.sub(_time_re, _expand_time_english, text) # Path: voice.clone/Fastspeech2/TTS/tts/utils/text/french/abbreviations.py # Path: voice.clone/Fastspeech2/TTS/tts/utils/text/cleaners.py import re from anyascii import anyascii from TTS.tts.utils.text.chinese_mandarin.numbers import replace_numbers_to_characters_in_text from .english.abbreviations import abbreviations_en from .english.number_norm import normalize_numbers as en_normalize_numbers from .english.time_norm import expand_time_english from .french.abbreviations import abbreviations_fr """Set of default text cleaners""" # TODO: pick the cleaner for languages dynamically # Regular expression matching whitespace: _whitespace_re = re.compile(r"\s+") def expand_abbreviations(text, lang="en"): if lang == "en": _abbreviations = abbreviations_en elif lang == "fr": _abbreviations = abbreviations_fr for regex, replacement in _abbreviations: text = re.sub(regex, replacement, text) return text def lowercase(text): return text.lower() def collapse_whitespace(text): return re.sub(_whitespace_re, " ", text).strip() def convert_to_ascii(text): return anyascii(text) def remove_aux_symbols(text): text = re.sub(r"[\<\>\(\)\[\]\"]+", "", text) return text def replace_symbols(text, lang="en"): text = text.replace(";", ",") text = text.replace("-", " ") text = text.replace(":", ",") if lang == "en": text = text.replace("&", " and ") elif lang == "fr": text = text.replace("&", " et ") elif lang == "pt": text = text.replace("&", " e ") return text def basic_cleaners(text): """Basic pipeline that lowercases and collapses whitespace without transliteration.""" text = lowercase(text) text = collapse_whitespace(text) return text def transliteration_cleaners(text): """Pipeline for non-English text that transliterates to ASCII.""" # text = convert_to_ascii(text) text = lowercase(text) text = collapse_whitespace(text) return text def basic_german_cleaners(text): """Pipeline for German text""" text = lowercase(text) text = collapse_whitespace(text) return text # TODO: elaborate it def basic_turkish_cleaners(text): """Pipeline for Turkish text""" text = text.replace("I", "ı") text = lowercase(text) text = collapse_whitespace(text) return text def english_cleaners(text): """Pipeline for English text, including number and abbreviation expansion.""" # text = convert_to_ascii(text) text = lowercase(text)
text = expand_time_english(text)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: nillion-oss/tinysig # Path: src/tinysig/utils.py def add(values: list[int], size: int) -> int: """ Calculate the sum of a list of integers modulo 'size'. Args: values (list[int]): A list of integers to be summed. size (int): The modulo value. Returns: int: The sum of the integers in 'values' modulo 'size'. Examples: >>> add([2, 4, 6], 5) 2 >>> add([3, 7, 10], 4) 0 """ result = 0 for v in values: result = (result + v) % size return result # Path: src/tinysig/utils.py def generate_additive_shares(secret: int, n: int, size: int) -> list[int]: """ Generates additive secret shares for a given secret value, using modular arithmetic. Args: secret (int): The secret value to be shared. n (int): The number of shares to generate. size (int): The modulus value for modular arithmetic. Returns: List[int]: A list of additive secret shares. Example: >>> random.seed(0) >>> generate_additive_shares(26, 3, 2**5) [8, 24, 26] """ shares = [rand(size) for _ in range(n-1)] last_sh = (secret - add(shares, size)) % size shares = [last_sh] + shares return shares # Path: src/tinysig/network.py from dataclasses import dataclass, field from typing import Dict, List, Union from .utils import add, generate_additive_shares @dataclass class Node: """ Represents a node in the network.""" id: int """Identifier for the node.""" shares_db: Dict[str, int] = field(default_factory=dict) """Database for holding shares.""" open_db: Dict[str, int] = field(default_factory=dict) """Database for holding open values.""" he_public_keys: Dict[int, int] = field(default_factory=dict) """Dictionary for holding homomorphic encryption public keys.""" def get_share(self, label: str) -> None: """Retrieve a share from the 'shares_db'.""" return self.shares_db[label] def get_open(self, label: str) -> None: """Retrieve an open value from the 'open_db'.""" return self.open_db[label] def set_share(self, value, label: str) -> None: """Set a share in the 'shares_db'.""" self.shares_db[label] = value def set_open(self, value, label: str) -> None: """Set an open value in the 'open_db'.""" self.open_db[label] = value def delete_share(self, label: str) -> None: """Delete a share from the 'shares_db'.""" self.shares_db.pop(label) def delete_open(self, label: str) -> None: """Delete an open value from the 'open_db'.""" self.open_db.pop(label) @dataclass class Client(Node): """Represents a client node in the network, inheriting from the 'Node' class.""" he_private_key: int = field(default=0) class Network: """Represents a network of nodes and clients. Manages the interactions and cryptographic operations within the network, including sharing secrets, broadcasting values, and reconstructing shared values. """ nodes: List[Node] """List of nodes in the network.""" clients: List[Client] """List of clients in the network.""" q: int """Prime field.""" h: int """Multiplicative field generator.""" def __init__(self, N, q, h=2, C=1): """ Initialize the network with 'N' nodes, prime field 'q', field generator 'h', and 'C' clients. Parameters: N (int): Number of nodes in the network. q (int): Prime field. h (int): Multiplicative field generator (default is 2). C (int): Number of clients in the network (default is 1). """ self.nodes = [Node(i+1) for i in range(N)] self.clients = [Client(i+1) for i in range(C)] self.N = N self.q = q self.h = h def print(self): """Print a readable representation of the network, including nodes and clients with their databases.""" print(f"Network(N={len(self.nodes)}, q={self.q},") print(" nodes=[") for node in self.nodes: print(f" Node(id={node.id},") print(" shares_db={") for key, value in node.shares_db.items(): print(f" {key}: {value},") print(" },") print(" public_keys={") for key, value in node.he_public_keys.items(): print(f" {key}: {value},") print(" },") print(" open_db={") for key, value in node.open_db.items(): print(f" {key}: {value},") print(" }") print(" )") print(" ]\n)") print(" clients=[") for client in self.clients: print(f" Client(id={client.id},") print(" shares_db={") for key, value in client.shares_db.items(): print(f" {key}: {value},") print(" },") print(" public_keys={") for key, value in client.he_public_keys.items(): print(f" {key}: {value},") print(" },") print(f" private_keys={client.he_private_key},") print(" open_db={") for key, value in client.open_db.items(): print(f" {key}: {value},") print(" }") print(" )") print(" ]\n)") def reconstruct_local(self, type_share: str, get_label: str, save_label: str, party: Union[Client, Node]) -> None: """Locally reconstruct exponent share ('exp') or base ('base') shared value.""" type_label = "_sh_exp" if type_share == "exp" else "_sh_base" p = (self.q - 1) if type_share == "exp" else self.q shares = [party.get_share(get_label+type_label+"_node_"+str(node.id)) for node in self.nodes]
reconstructed = add(shares, p)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: naver-ai/scob # Path: lightning_modules/data_modules/transforms/common.py TRANSFORM_NAME_TO_CLASS = { "RandomRotate": RandomRotate, "CraftRandomCrop": CraftRandomCrop, "Resize": Resize, "ResizeOD": ResizeOD, "PhotometricDistort": PhotometricDistort, "MoCo_PhotometricDistort": MoCo_PhotometricDistort, "ResizeTwoPic": ResizeTwoPic, "ResizeMultiview": ResizeMultiview, "KeepAspectRatioBilinearResize": KeepAspectRatioBilinearResize, "RandomCrop": RandomCrop, "MultiScaleResize": MultiScaleResize, "KeepAspectRatioBilinearResizeOD": KeepAspectRatioBilinearResizeOD, "Otor_OriginDistort": Otor_OriginDistort, } # Path: lightning_modules/data_modules/transforms/common.py class W_Compose: """ Modified pytorch compose pytorch.org/vision/0.10/transforms.html#torchvision.transforms.Compose """ def __init__(self, transforms): self.transforms = transforms def __call__(self, img, quads=None): second_img = None multiview_img = None for transform in self.transforms: if ( isinstance(transform, ResizeTwoPic) and transform.second_size is not None ): img, second_img, quads = transform(img, quads) elif isinstance(transform, ResizeMultiview): img, multiview_img, quads = transform(img, quads) elif isinstance(transform, MoCo_PhotometricDistort): img, _ = transform(img, quads) multiview_img, quads = transform(multiview_img, quads) else: img, quads = transform(img, quads) if second_img is None and multiview_img is None: return img, quads elif multiview_img is not None: return img, multiview_img, quads else: return img, second_img, quads def __repr__(self): format_string = self.__class__.__name__ + "(" for transform in self.transforms: format_string += f"\n {transform}" format_string += "\n)" return format_string # Path: utils/dataset_utils.py def get_image_normalize_mean_and_std(image_normalize): if image_normalize is None: mean_and_std = None elif image_normalize == "imagenet_default": mean_and_std = (IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD) elif image_normalize == "imagenet_inception": # In BEiT, "--imagenet_default_mean_and_std: enable this for ImageNet-1k pre-training, # i.e., (0.485, 0.456, 0.406) for mean and (0.229, 0.224, 0.225) for std. # We use (0.5, 0.5, 0.5) for mean and (0.5, 0.5, 0.5) for std by default # on other pre-training data." mean_and_std = (IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD) else: raise ValueError(f"Unknown image_normalize={image_normalize}") return mean_and_std # Path: lightning_modules/data_modules/transforms/transformer_decoder.py from typing import List, Tuple, Union from lightning_modules.data_modules.transforms.common import ( TRANSFORM_NAME_TO_CLASS, W_Compose, ) from utils.dataset_utils import get_image_normalize_mean_and_std import torch import torchvision.transforms as transforms class TransformerDecoderTransformForFineTuning: """ - BEiT: https://github.com/microsoft/unilm/blob/master/beit/datasets.py#L27 - TrOCR: https://github.com/microsoft/unilm/blob/53995b4876464146365693396aaaa09e88a4494e/trocr/data_aug.py#L120 """ def __init__( self, size: Union[Tuple, List], transforms_list=None, image_normalize="imagenet_default", ): self.common_transform = self.__get_common_transform(size, transforms_list) self.patch_transform = self.__get_patch_transform(image_normalize) def __call__(self, img, quads): for_patches, quads = self.common_transform(img, quads) for_patches = self.patch_transform(for_patches) return for_patches, quads @staticmethod def __get_common_transform(size, transforms_list): tranforms = [] for transform_obj in transforms_list: transform_class = TRANSFORM_NAME_TO_CLASS[transform_obj.name] if transform_obj.params is not None: params = dict(transform_obj.params) else: params = {} if transform_obj.name in [ "Resize", "ResizeOD", "KeepAspectRatioBilinearResize", "ResizeMultiview", "MultiScaleResize", "KeepAspectRatioBilinearResizeOD", ]: params["size"] = size elif transform_obj.name == "ResizeTwoPic": params["size"] = size tranforms.append(transform_class(**params)) return W_Compose(tranforms) @staticmethod def __get_patch_transform(image_normalize): patch_trans = [transforms.ToTensor()]
mean_and_std = get_image_normalize_mean_and_std(image_normalize)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: speckai/speck # Path: src/python/speck/chat/entities.py NOT_GIVEN = None class Message(BaseModel): class SafeDict(dict): class Prompt(str): class Response(BaseModel): class MessageChunk(BaseModel): class Stream: class LogConfig(BaseModel): class Config: class ChatConfig: class OpenAIChatConfig(ChatConfig): class IChatClient(ABC): def __missing__(self, key): def to_dict(self): def __init__( self, messages: PromptTypes, variables: Union[dict[str, str], None] = None, **kwargs, ): def create( cls, messages: PromptTypes, variables: dict[str, str] = None ) -> "Prompt": def _read(cls, lines: str) -> "Prompt": def add_message(): def read(cls, path: str, name: Union[str, None] = None) -> "Prompt": def read_all(cls, path: str) -> dict[str, "Prompt"]: def _file(self): def write(cls, prompt: Union["Prompt", dict[str, "Prompt"]], path: str): def __new__( cls, messages: PromptTypes, **kwargs, ): def from_openai(cls, messages: list[dict[str, str]]): def to_list(self): def to_dict(self): def _apply_variables( messages: list[Message], variables: dict[str, str] ) -> list[Message]: def _check_duplicate_keys(self, other_variables: dict[str, str]) -> dict[str, str]: def _remove_duplicate_keys_from_messages( self, other_variables: dict[str, str] ) -> list[Message]: def format(self, *args, **kwargs): def __add__(self, other): def __str__(self): def __init__( self, content: str, closed: bool = False, prompt_tokens: Union[int, None] = None, completion_tokens: Union[int, None] = None, raw: Union[dict, None] = None, **kwargs, ): def create(cls, response: ResponseTypes) -> "Response": def __str__(self): def encode(self, encoding: str = "utf-8"): def __init__( self, client: "Speck", iterator: Iterator[Any], kwargs: dict, log_config: "LogConfig", processor: Callable[[Any], MessageChunk], ): def _log(self): def _process(self, item) -> MessageChunk: def __next__(self) -> MessageChunk: def __iter__(self) -> Iterator[MessageChunk]: def close(self): def __init__( self, *, provider: str = None, model: OpenAIModel, stream: bool = False, _log: bool = True, temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, max_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, frequency_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN, presence_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN, **config_kwargs, ): def to_dict(self): def _convert_optional(self, value): def create(cls, config: ChatConfigTypes, kwargs: dict = None) -> "ChatConfig": def get(self, key: str, default: Any = None) -> Any: def convert(self, provider: str = "speck") -> "ChatConfig": def log_chat( self, *, log_config: LogConfig, prompt: Prompt, response: Response, provider: str = "speck", ): def encode(self, encoding: str = "utf-8"): def __str__(self): def __init__( self, model: OpenAIModel, stream: bool = False, _log: bool = True, temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, max_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, frequency_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN, presence_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN, **config_kwargs, ): def convert(self, provider: str = "speck") -> ChatConfig: def debug_chat( self, prompt: "Prompt", config: "ChatConfig" ) -> ("Prompt", "ChatConfig"): def chat( self, prompt: PromptTypes, config: Union[ChatConfig, NotGiven] = NOT_GIVEN, **config_kwargs, ) -> Union[Response, Stream]: async def achat( self, prompt: PromptTypes, config: Union[ChatConfig, NotGiven] = NOT_GIVEN, **config_kwargs, ) -> Union[Response, Stream]: # Path: src/python/speck/connections/providers.py class Providers(Enum): Anthropic = "Anthropic" AzureOpenAI = "AzureOpenAI" OpenAI = "OpenAI" CustomProvider = "CustomProvider" Replicate = "Replicate" # Path: src/python/speck/connections/connector.py from abc import ABC from ..chat.entities import ChatLogger, LogConfig, Prompt, Response from .providers import Providers class IConnector(ABC): _client: "Speck" def __init__(self, client: "Speck", provider: Providers): self._client = client self.provider = provider # @abstractmethod # def process_message(self, messages: Messages, model: str) -> str: # pass def _get_log_kwargs(self, prompt: Prompt, response: Response, **kwargs): return { "provider": self.provider, "model": kwargs.get("model"), "temperature": kwargs.get("temperature"), "stream": kwargs.get("stream", False), "prompt": prompt, "config": kwargs, "response": response, } def log( self, *, log_config: LogConfig, prompt: Prompt, response: Response, **kwargs ): # Todo: refactor to use config.log_chat !!!
ChatLogger.log(
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: chaiNNer-org/spandrel # Path: src/spandrel/architectures/KBNet/arch/kb_utils.py class KBAFunction(torch.autograd.Function): @staticmethod def forward(ctx, x, att, selfk, selfg, selfb, selfw): B, nset, H, W = att.shape KK = selfk**2 selfc = x.shape[1] att = att.reshape(B, nset, H * W).transpose(-2, -1) ctx.selfk, ctx.selfg, ctx.selfc, ctx.KK, ctx.nset = ( selfk, selfg, selfc, KK, nset, ) ctx.x, ctx.att, ctx.selfb, ctx.selfw = x, att, selfb, selfw bias = att @ selfb attk = att @ selfw uf = torch.nn.functional.unfold(x, kernel_size=selfk, padding=selfk // 2) # for unfold att / less memory cost uf = uf.reshape(B, selfg, selfc // selfg * KK, H * W).permute(0, 3, 1, 2) attk = attk.reshape(B, H * W, selfg, selfc // selfg, selfc // selfg * KK) x = attk @ uf.unsqueeze(-1) # del attk, uf x = x.squeeze(-1).reshape(B, H * W, selfc) + bias x = x.transpose(-1, -2).reshape(B, selfc, H, W) return x @staticmethod def backward(ctx, grad_output): x, att, selfb, selfw = ctx.x, ctx.att, ctx.selfb, ctx.selfw selfk, selfg, selfc, KK, nset = ( ctx.selfk, ctx.selfg, ctx.selfc, ctx.KK, ctx.nset, ) B, selfc, H, W = grad_output.size() dbias = grad_output.reshape(B, selfc, H * W).transpose(-1, -2) dselfb = att.transpose(-2, -1) @ dbias datt = dbias @ selfb.transpose(-2, -1) attk = att @ selfw uf = F.unfold(x, kernel_size=selfk, padding=selfk // 2) # for unfold att / less memory cost uf = uf.reshape(B, selfg, selfc // selfg * KK, H * W).permute(0, 3, 1, 2) attk = attk.reshape(B, H * W, selfg, selfc // selfg, selfc // selfg * KK) dx = dbias.view(B, H * W, selfg, selfc // selfg, 1) dattk = dx @ uf.view(B, H * W, selfg, 1, selfc // selfg * KK) duf = attk.transpose(-2, -1) @ dx del attk, uf dattk = dattk.view(B, H * W, -1) datt += dattk @ selfw.transpose(-2, -1) dselfw = att.transpose(-2, -1) @ dattk duf = duf.permute(0, 2, 3, 4, 1).view(B, -1, H * W) dx = F.fold(duf, output_size=(H, W), kernel_size=selfk, padding=selfk // 2) datt = datt.transpose(-1, -2).view(B, nset, H, W) return dx, datt, None, None, dselfb, dselfw # Path: src/spandrel/architectures/KBNet/arch/kb_utils.py class LayerNorm2d(nn.Module): def __init__(self, channels, eps=1e-6, requires_grad=True): super().__init__() self.register_parameter( "weight", nn.Parameter(torch.ones(channels), requires_grad=requires_grad) ) self.register_parameter( "bias", nn.Parameter(torch.zeros(channels), requires_grad=requires_grad) ) self.eps = eps def forward(self, x): return LayerNormFunction.apply(x, self.weight, self.bias, self.eps) # Path: src/spandrel/architectures/KBNet/arch/kb_utils.py class SimpleGate(nn.Module): def forward(self, x): x1, x2 = x.chunk(2, dim=1) return x1 * x2 # Path: src/spandrel/architectures/KBNet/arch/kbnet_s.py import math import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from .kb_utils import KBAFunction, LayerNorm2d, SimpleGate # type: ignore class KBBlock_s(nn.Module): def __init__( self, c, DW_Expand=2, FFN_Expand=2, nset=32, k=3, gc=4, lightweight=False ): super().__init__() self.k, self.c = k, c self.nset = nset dw_ch = int(c * DW_Expand) ffn_ch = int(FFN_Expand * c) self.g = c // gc self.w = nn.Parameter(torch.zeros(1, nset, c * c // self.g * self.k**2)) self.b = nn.Parameter(torch.zeros(1, nset, c)) self.init_p(self.w, self.b)
self.norm1 = LayerNorm2d(c)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: robocorp/llmstatemachine # Path: src/llmstatemachine/function.py def create_definition(func: Callable, goal: str) -> FunctionDefinition: source = inspect.getsource(func) client = OpenAI() response = client.chat.completions.create( model="gpt-4-1106-preview", messages=[ { "role": "system", "content": f"""Extract function metadata from the following function definition: ``` {source} ``` Focus on details that are meaningful for the following assignment: ``` {goal} ``` Extract the function metadata. """, } ], functions=[ { "description": "FunctionDefinition is a tool for metadata extraction", "name": "FunctionDefinition", "parameters": { "type": "object", "properties": { "thinking": { "type": "string", "description": ( "Logical thinking about function metadata extraction and draft of the answer." ), }, "function_name": { "type": "string", "description": "Name of the function.", }, "function_description": { "type": "string", "description": "Short well thought description of what the function is used for.", }, "argument_description": { "type": "string", "description": "Short well thought description of what the function argument is used for.", }, }, "required": [ "thinking", "function_name", "function_description", "argument_description", ], }, } ], function_call={"name": "FunctionDefinition"}, ) msg = response.choices[0].message assert msg.function_call print(msg.function_call) args: FunctionDefinition = json.loads(msg.function_call.arguments) if not is_valid_function_definition(args): raise ValueError("Invalid data format for FunctionDefinition") return args # Path: src/llmstatemachine/function.py class FunctionDefinition(TypedDict): function_name: str function_description: str argument_description: str # Path: src/llmstatemachine/workflow_agent.py import json from typing import Dict, Callable, Any, Tuple, List from openai.types.chat.chat_completion_message import FunctionCall from .function import create_definition, FunctionDefinition from openai import OpenAI from openai.types.chat import ( ChatCompletionMessageParam, ChatCompletionMessage, completion_create_params, ) TransitionFunction = Callable[[...], str] FUNCTION_NAME = "ActionSelector" MODEL = "gpt-4-1106-preview" # "gpt-4" _CURRENT_STEPPING_AGENT = None class WorkflowAgent: def __init__( self, goal: str, transitions: Dict[str, Dict[str, TransitionFunction]] ): if "INIT" not in transitions: raise Exception("Must define INIT state") self._transitions: Dict[str, Dict[str, TransitionFunction]] = transitions self._current_state = "INIT" self.next_state = None self._messages: List[ChatCompletionMessageParam] = [] self._messages.append({"role": "system", "content": goal}) self._client = OpenAI()
self._func_defs: Dict[TransitionFunction, FunctionDefinition] = dict()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: GoldenThrust/Virtual-Bank # Path: api/debit_cards/models.py class DebitCard(models.Model): account = models.ForeignKey(Account, on_delete=models.CASCADE) card_number = models.BigIntegerField() cvv = models.CharField(max_length=4) expiration_date = models.DateTimeField() created_date = models.DateTimeField(auto_now_add=True) def __str__(self): return f"{self.card_number} - User: {self.account.user.first_name} {self.account.user.last_name}" # Path: api/debit_cards/models.py class DebitCardTransaction(models.Model): transaction = models.OneToOneField(Transaction, on_delete=models.CASCADE, related_name='debit_card') transaction_partner_account = models.ForeignKey(Account, on_delete=models.CASCADE, related_name='transaction_partner_debit_card') def __str__(self): return f"Transfer ID: {self.pk} - Receiver: {self.transaction.account.user.first_name} {self.transaction.account.user.last_name} - Transaction_partner: {self.transaction_partner_account.user.first_name} {self.transaction_partner_account.user.last_name} - Amount: {self.transaction.amount}" # Path: api/debit_cards/utils.py def generate_valid_credit_card_number(): ''' Generate a random credit card number using the Luhn algorithm. Returns: - str: A valid 16-digit credit card number with a starting digit '5'. ''' card_number = '5' + ''.join(str(random.randint(0, 9)) for _ in range(13)) checksum = luhn_checksum(card_number) while checksum != 0: card_number = '5' + ''.join(str(random.randint(0, 9)) for _ in range(13)) checksum = luhn_checksum(card_number) return card_number # Path: api/debit_cards/utils.py def generate_cvv(card_number, expiration_date): ''' Generate a simulated CVV based on the provided card number and expiration date. Args: - card_number (str): The card number to generate CVV from. - expiration_date (datetime.datetime): The expiration date of the card. Returns: - str: The simulated CVV code. ''' formatted_date = expiration_date.strftime('%d%m') card_number_int = int(card_number) masked_card_number = card_number_int >> 5 combined_data = int(f'{formatted_date}{masked_card_number}') masked_combined_data = combined_data & card_number_int hashed = hashlib.sha256(str(masked_combined_data).encode()).hexdigest() cvv = [] index = 0 for char in hashed[::-5]: index += 1 try: int_value = int(char) if len(cvv) < 3: cvv.append(char) except ValueError: pass return ''.join(cvv) # Path: api/debit_cards/serializers.py from rest_framework import serializers from .models import DebitCard, DebitCardTransaction from .utils import generate_valid_credit_card_number, generate_cvv from accounts.serializers import AccountSerializer from transactions.serializers import TransactionSerializer class DebitCardSerializer(serializers.ModelSerializer): card_number = serializers.CharField(read_only=True) cvv = serializers.CharField(read_only=True) created_date = serializers.DateTimeField(read_only=True) account = AccountSerializer() expiration_date = serializers.SerializerMethodField() class Meta:
model = DebitCard
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Mj23978/OpenServer # Path: openserver/core/config/config.py def get_config(self, key: str, default: Optional[str] = None) -> str | None: return self.model_dump().get(key, default) # Path: openserver/core/vector_store/base.py class VectorStore(ABC): client: Vector def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Run more documents through the embeddings and add to the vectorstore. """ texts: list[str] = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return self.add_texts(texts=texts, metadatas=metadatas, **kwargs) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: return self.client.add_texts(texts=texts, metadatas=metadatas, kwargs=kwargs) def similarity_search(self, query: str, top_k: int, metadata: Optional[dict] = None, **kwargs: Any) -> List[Document]: return self.client.similarity_search(query=query, top_k=top_k, metadata=metadata, kwargs=kwargs) def similarity_search_with_relevance_scores(self, query: str, top_k: int, score_threshold: float, **kwargs: Any) -> List[Tuple[Document, float]]: return self.client.similarity_search_with_relevance_scores(query=query, top_k=top_k, score_threshold=score_threshold, kwargs=kwargs) def delete_embeddings_from_vector_db(self, ids: List[str]) -> bool | None: return self.client.delete(ids=ids) # Path: openserver/core/vector_store/embedding/base.py class BaseEmbedding(ABC): client: Embeddings @abstractmethod def get_embeddings(self, text: List[str]) -> List[List[float]]: pass @abstractmethod def get_embedding(self, text: str) -> List[float]: pass # Path: openserver/core/vector_store/qdrant.py from mimetypes import common_types from typing import Dict, Optional, Union from qdrant_client import QdrantClient from qdrant_client.conversions import common_types from langchain.vectorstores.qdrant import Qdrant from ..config.config import get_config from .base import VectorStore from .embedding.base import BaseEmbedding from __future__ import annotations DictFilter = Dict[str, Union[str, int, bool, dict, list]] MetadataFilter = Union[DictFilter, common_types.Filter] def create_qdrant_client(api_key: Optional[str] = None, url: Optional[str] = None, port: Optional[int] = None ) -> QdrantClient: if api_key is None: qdrant_host_name = get_config("QDRANT_HOST_NAME") or "localhost" qdrant_port = int(get_config("QDRANT_PORT", default="6333")) qdrant_client = QdrantClient(host=qdrant_host_name, port=qdrant_port) else: qdrant_client = QdrantClient(api_key=api_key, url=url, port=port) return qdrant_client class QdrantVectorStore(VectorStore): def __init__( self, client: QdrantClient, collection_name: str,
embedding_model: BaseEmbedding,
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: TCLResearchEurope/torch-dag # Path: torch_dag_algorithms/pruning/orbits_search_stage.py class OrbitsDiscoveryStage(enum.Enum): EXTENDED_ORBIT_DISCOVERY = 'extended_orbits_discovery' FINAL_ORBIT_DISCOVERY = 'final_orbits_discovery' CLASSIC_ATTENTION_DISCOVERY = 'classic_attention_discovery' # Path: torch_dag/core/dag_module.py class InnerVertex(Vertex): def __init__( self, name: str, module: torch.nn.Module, predecessors: List[Vertex], ): super().__init__(name=name) self._module = module self._predecessors = list(predecessors) self.dag_module: "DagModule" = None self.orbit = None @property def successors(self) -> List['InnerVertex']: if self.dag_module is None: logger.error(f'Trying to get successors of an InnerVertex that has not been assigned to any DagModule.') return [vertex for vertex in self.dag_module.inner_vertices if self in vertex.predecessors] @property def predecessors(self) -> List[Vertex]: return self._predecessors @property def predecessor_indices(self) -> List[Vertex]: return [self.dag_module.vertices.index(pd) for pd in self.predecessors] @predecessors.setter def predecessors(self, new_predecessors: List[Vertex]): if not isinstance(new_predecessors, list): logger.error(f'Predecessors is expected to be a list. Got {type(new_predecessors)} except.') self._predecessors = new_predecessors @property def module(self) -> torch.nn.Module: return self._module @module.setter def module(self, module: torch.nn.Module): self._module = module # TODO: Remove after validation self.dag_module.update_inner_modules() def config_dict(self, atomic_modules: List[torch.nn.Module]): is_atomic = not isinstance(self.module, DagModule) result = { 'name': self.name, 'predecessor_indices': self.predecessor_indices, 'is_atomic': is_atomic, 'type': 'inner', 'orbit': self.orbit, } if not is_atomic: result['module_dict'] = self.module.config_dict(atomic_modules) else: result['module_index'] = atomic_modules.index(self.module) return result # Path: torch_dag_algorithms/pruning/orbit.py from typing import List from typing import Set from typing import Tuple from torch_dag_algorithms.pruning.orbits_search_stage import OrbitsDiscoveryStage from torch_dag.core.dag_module import InnerVertex class Orbit: def __init__(self, color: int): """Basic orbit object that can represent either extended or final orbit. If orbit has `allow_for_further_processing` set to True then it can be processed by Orbitalizer by it's general mechanism. If set to False orbit won't be processed in any way and will be passed to orbitalization algorithm in unchanged state. `_found_by` - indicates what stage lead to orbit being found. It's used in testing handling custom known patterns that are handled by hand. It also holds information that can be usefull durning debugging. Args: color (int): orbit color. has to be unique allow_for_further_processing (bool, optional): If False orbit won't be process in any way. Defaults to True. """ self.color = color self.vertices_in_scope: Set[InnerVertex] = set() self.sources: List[InnerVertex] = [] self.sinks: List[InnerVertex] = [] self.end_path: List[Tuple[InnerVertex, InnerVertex]] = [] self.kmapps = None self._discovery_stage = None @property
def discovery_stage(self) -> OrbitsDiscoveryStage:
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: repeating/Binance-P2P-alerts-Telegram-bot # Path: bot/binance_api.py async def get_offers(asset: str, fiat: str, trade_type: str, payment_method: str, rows: int = 5, page: int = 1, trans_amount: str = None) -> List[dict]: """ Fetch the best offers from Binance P2P. :param asset: Cryptocurrency asset, e.g., 'USDT', 'BTC'. :param fiat: Fiat currency, e.g., 'USD', 'EUR'. :param trade_type: Trade type, either 'Buy' or 'Sell'. :param rows: Number of offers to retrieve, default is 5. :param page: Page number for pagination, default is 1. :param trans_amount: Transaction amount for filtering offers. :param payment_method: payment type, default is "Wise". :return: List of offers from Binance P2P. """ data = { "asset": asset, "fiat": fiat, "merchantCheck": 'true', # Assuming this should always be true for more reliable offers. "page": page, "payTypes": [payment_method], "publisherType": None, # Assuming we don't filter by publisher type. "rows": rows, "tradeType": trade_type, "transAmount": trans_amount } headers = { "Content-Type": "application/json" } async with aiohttp.ClientSession() as session: async with session.post(BINANCE_P2P_API_URL, json=data, headers=headers) as response: if response.status == 200: response_json = await response.json() offers_data = response_json.get('data', []) offers = [{ 'price': to_float(adv.get('price')), 'min_amount': to_float(adv.get('minSingleTransAmount')), 'max_amount': to_float(adv.get('maxSingleTransAmount')) } for item in offers_data for adv in [item.get('adv', {})]] return offers else: raise Exception(f"Error fetching offers from Binance P2P: {response.status} - {await response.text()}") # Path: bot/binance_api.py def get_link(fiat: str, asset: str, payment_method: str, order_type: str): """ Get the link to the offers from Binance P2P. :param asset: Cryptocurrency asset, e.g., 'USDT', 'BTC'. :param fiat: Fiat currency, e.g., 'USD', 'EUR'. :param payment_method: payment type, default is "Wise". :param order_type: Order type, either 'Buy' or 'Sell'. :return: str, link to the offers from Binance P2P. """ url = f"https://p2p.binance.com/en/trade/{order_type}/{payment_method}/{asset}?fiat={fiat}" return url # Path: bot/utils.py def send_telegram_message(user_id, message): """ Send a message to a user from a Telegram bot. Parameters: user_id (str): Unique identifier for the target user or username of the target channel. message (str): Text of the message to be sent. Returns: dict: Response from the Telegram API. """ # Telegram API endpoint for sending messages send_message_url = f"https://api.telegram.org/bot{TELEGRAM_TOKEN}/sendMessage" # Parameters for the API request params = { 'chat_id': user_id, 'text': message, 'parse_mode': 'HTML' } # Making the request to the Telegram API response = requests.post(send_message_url, params=params) # Returning the response as a Python dictionary return response.json() # Path: bot/alerts/alert.py from datetime import datetime, timedelta from bot.binance_api import get_offers, get_link from bot.utils import send_telegram_message class Alert: def __init__(self, alert_id, user_id, asset, fiat, trade_type, threshold_price, payment_method): self.alert_id = alert_id self.user_id = user_id self.asset = asset self.fiat = fiat self.trade_type = trade_type self.threshold_price = threshold_price self.payment_method = payment_method self.active = True self.last_triggered = None # Track when the alert was last triggered self.trigger_interval = 15 # in minutes
self.link = get_link(self.fiat, self.asset, self.payment_method, self.trade_type)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: timlrx/simple-ai-agents # Path: simple_ai_agents/models.py class ChatMessage(BaseModel): role: str content: str name: Optional[str] = None function_call: Optional[str] = None received_at: datetime.datetime = Field(default_factory=now_tz) finish_reason: Optional[str] = None prompt_length: Optional[int] = None completion_length: Optional[int] = None total_length: Optional[int] = None def __str__(self) -> str: return str(self.model_dump_json(exclude_none=True)) # Path: simple_ai_agents/models.py class ChatSession(BaseModel): id: Union[str, UUID] = Field(default_factory=uuid4) created_at: datetime.datetime = Field(default_factory=now_tz) system: str params: Dict[str, Any] = {} messages: List[ChatMessage] = [] input_fields: Set[str] = set() recent_messages: Optional[int] = None save_messages: Optional[bool] = True total_prompt_length: int = 0 total_completion_length: int = 0 total_length: int = 0 title: Optional[str] = None def __str__(self) -> str: sess_start_str = self.created_at.strftime("%Y-%m-%d %H:%M:%S") if self.messages: last_message_str = self.messages[-1].received_at.strftime( "%Y-%m-%d %H:%M:%S" ) else: last_message_str = "N/A" return f"""Chat session started at {sess_start_str}: - {len(self.messages):,} Messages - Last message sent at {last_message_str}""" def add_messages( self, user_message: ChatMessage, assistant_message: ChatMessage, save_messages: Optional[bool] = None, ) -> None: # if save_messages is explicitly defined, always use that choice # instead of the default to_save = isinstance(save_messages, bool) if to_save: if save_messages: self.messages.append(user_message) self.messages.append(assistant_message) elif self.save_messages: self.messages.append(user_message) self.messages.append(assistant_message) # Path: simple_ai_agents/models.py class LLMOptions(TypedDict, total=False): model: str functions: List function_call: str temperature: float top_p: float n: int stream: bool stop: str max_tokens: float presence_penalty: float frequency_penalty: float logit_bias: dict user: str deployment_id: str request_timeout: int api_base: str api_version: str api_key: str model_list: list # Path: simple_ai_agents/chat_session.py from json import JSONDecodeError from typing import Any, AsyncGenerator, Generator, Optional, Type, TypeVar from instructor.function_calls import Mode from instructor.patch import handle_response_model, process_response from litellm import ModelResponse, acompletion, completion from pydantic import BaseModel, ValidationError from simple_ai_agents.models import ChatMessage, ChatSession, LLMOptions import litellm litellm.telemetry = False litellm.add_function_to_prompt = True # add function to prompt for non openai models litellm.drop_params = True # drop params if unsupported by provider litellm.suppress_debug_info = True T = TypeVar("T", bound=BaseModel)
class ChatLLMSession(ChatSession):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: DIAGNijmegen/HoVer-UNet # Path: models/HoVerNet/utils.py def remove_small_objects(pred, min_size=64, connectivity=1): """Remove connected components smaller than the specified size. This function is taken from skimage.morphology.remove_small_objects, but the warning is removed when a single label is provided. Args: pred: input labelled array min_size: minimum size of instance in output array connectivity: The connectivity defining the neighborhood of a pixel. Returns: out: output array with instances removed under min_size """ out = pred if min_size == 0: # shortcut for efficiency return out if out.dtype == bool: selem = ndimage.generate_binary_structure(pred.ndim, connectivity) ccs = np.zeros_like(pred, dtype=np.int32) ndimage.label(pred, selem, output=ccs) else: ccs = out try: component_sizes = np.bincount(ccs.ravel()) except ValueError: raise ValueError( "Negative value labels are not supported. Try " "relabeling the input with `scipy.ndimage.label` or " "`skimage.morphology.label`." ) too_small = component_sizes < min_size too_small_mask = too_small[ccs] out[too_small_mask] = 0 return out # Path: models/HoVerNet/utils.py def get_bounding_box(img): """Get bounding box coordinate information.""" rows = np.any(img, axis=1) cols = np.any(img, axis=0) rmin, rmax = np.where(rows)[0][[0, -1]] cmin, cmax = np.where(cols)[0][[0, -1]] # due to python indexing, need to add 1 to max # else accessing will be 1px in the box, not out rmax += 1 cmax += 1 return [rmin, rmax, cmin, cmax] # Path: models/HoVerNet/post_proc.py import warnings import cv2 import numpy as np from scipy.ndimage import measurements from scipy.ndimage.morphology import ( binary_fill_holes, ) from skimage.segmentation import watershed from models.HoVerNet.utils import remove_small_objects, get_bounding_box def noop(*args, **kargs): pass warnings.warn = noop #### def __proc_np_hv(pred): """Process Nuclei Prediction with XY Coordinate Map. Args: pred: prediction output, assuming channel 0 contain probability map of nuclei channel 1 containing the regressed X-map channel 2 containing the regressed Y-map """ pred = np.array(pred, dtype=np.float32) blb_raw = pred[..., 0] h_dir_raw = pred[..., 1] v_dir_raw = pred[..., 2] # processing blb = np.array(blb_raw >= 0.5, dtype=np.int32) blb = measurements.label(blb)[0] blb = remove_small_objects(blb, min_size=10) blb[blb > 0] = 1 # background is 0 already h_dir = cv2.normalize( h_dir_raw, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F ) v_dir = cv2.normalize( v_dir_raw, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F ) sobelh = cv2.Sobel(h_dir, cv2.CV_64F, 1, 0, ksize=21) sobelv = cv2.Sobel(v_dir, cv2.CV_64F, 0, 1, ksize=21) sobelh = 1 - ( cv2.normalize( sobelh, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F ) ) sobelv = 1 - ( cv2.normalize( sobelv, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F ) ) overall = np.maximum(sobelh, sobelv) overall = overall - (1 - blb) overall[overall < 0] = 0 dist = (1.0 - overall) * blb ## nuclei values form mountains so inverse to get basins dist = -cv2.GaussianBlur(dist, (3, 3), 0) overall = np.array(overall >= 0.4, dtype=np.int32) marker = blb - overall marker[marker < 0] = 0 marker = binary_fill_holes(marker).astype("uint8") kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) marker = cv2.morphologyEx(marker, cv2.MORPH_OPEN, kernel) marker = measurements.label(marker)[0] marker = remove_small_objects(marker, min_size=10) proced_pred = watershed(dist, markers=marker, mask=blb) return proced_pred #### def process(pred_map, nr_types=None, return_centroids=False): """Post processing script for image tiles. Args: pred_map: commbined output of tp, np and hv branches, in the same order nr_types: number of types considered at output of nc branch overlaid_img: img to overlay the predicted instances upon, `None` means no type_colour (dict) : `None` to use random, else overlay instances of a type to colour in the dict output_dtype: data type of output Returns: pred_inst: pixel-wise nuclear instance segmentation prediction pred_type_out: pixel-wise nuclear type prediction """ if nr_types is not None: pred_type = pred_map[..., :1] pred_inst = pred_map[..., 1:] pred_type = pred_type.astype(np.int32) else: pred_inst = pred_map pred_inst = np.squeeze(pred_inst) pred_inst = __proc_np_hv(pred_inst) inst_info_dict = None if return_centroids or nr_types is not None: inst_id_list = np.unique(pred_inst)[1:] # exlcude background inst_info_dict = {} for inst_id in inst_id_list: inst_map = pred_inst == inst_id # TODO: chane format of bbox output
rmin, rmax, cmin, cmax = get_bounding_box(inst_map)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: fofr/cog-sdxl-lcm-multi-controlnet-lora # Path: controlnet_preprocess.py class ControlNetPreprocessor: ANNOTATOR_NAMES = [ "none", "edge_canny", "depth_leres", "depth_midas", "soft_edge_pidi", "soft_edge_hed", "lineart", "lineart_anime", "openpose", # "straight_edge_mlsd", # "face_detector", # "content_shuffle", # "normal_bae", # "segementation_sam", ] def __init__(self, predictor): WeightsDownloader.download_if_not_exists( CONTROLNET_PREPROCESSOR_URL, CONTROLNET_PREPROCESSOR_MODEL_CACHE ) self.annotators = { "edge_canny": CannyDetector(), "depth_leres": self.initialize_detector(LeresDetector), "depth_midas": self.initialize_detector(MidasDetector), "soft_edge_pidi": self.initialize_detector(PidiNetDetector), "soft_edge_hed": self.initialize_detector(HEDdetector), "lineart": self.initialize_detector(LineartDetector), "lineart_anime": self.initialize_detector(LineartAnimeDetector), "openpose": self.initialize_detector(OpenposeDetector), # "straight_edge_mlsd": self.initialize_detector(MLSDdetector), # "face_detector": MediapipeFaceDetector(), # "content_shuffle": ContentShuffleDetector(), # "normal_bae": self.initialize_detector(NormalBaeDetector), # "segementation_sam": self.initialize_detector( # SamDetector, # model_name="ybelkada/segment-anything", # subfolder="checkpoints", # ), } torch.device("cuda") @staticmethod def get_annotator_names(): return ControlNetPreprocessor.ANNOTATOR_NAMES def initialize_detector( self, detector_class, model_name="lllyasviel/Annotators", **kwargs ): print(f"Initializing {detector_class.__name__}") return detector_class.from_pretrained( model_name, cache_dir=CONTROLNET_PREPROCESSOR_MODEL_CACHE, **kwargs, ) def annotators_list(self): return list(self.annotators.keys()) def process_image(self, image, annotator): print(f"Processing image with {annotator}") return self.annotators[annotator](image) # Path: weights_downloader.py class WeightsDownloader: @staticmethod def download_if_not_exists(url, dest): if not os.path.exists(dest): WeightsDownloader.download(url, dest) @staticmethod def download(url, dest): start = time.time() print("downloading url: ", url) print("downloading to: ", dest) subprocess.check_call(["pget", "-x", url, dest], close_fds=False) print("downloading took: ", time.time() - start) # Path: controlnet.py import torch from diffusers import ControlNetModel from controlnet_preprocess import ControlNetPreprocessor from weights_downloader import WeightsDownloader CONTROLNET_MODEL_CACHE = "./controlnet-cache" CONTROLNET_URL = "https://weights.replicate.delivery/default/controlnet/sdxl-cn-canny-depth-softe-pose-qr.tar" class ControlNet: CONTROLNET_MODELS = [ "none", "edge_canny", "illusion", "depth_leres", "depth_midas", "soft_edge_pidi", "soft_edge_hed", "lineart", "lineart_anime", "openpose", # Preprocessors without an XL model yet # "straight_edge_mlsd", # "face_detector", # "content_shuffle", # "normal_bae", # "segementation_sam", ] def __init__(self, predictor):
WeightsDownloader.download_if_not_exists(CONTROLNET_URL, CONTROLNET_MODEL_CACHE)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: joyn-gg/discord.http # Path: discord_http/emoji.py class PartialEmoji: def __init__(self, emoji: str): self._original_name: str = emoji self.id: Optional[int] = None self.animated: bool = False self.discord_emoji: bool = False is_custom: Optional[re.Match] = utils.re_emoji.search(emoji) if is_custom: _animated, _name, _id = is_custom.groups() self.discord_emoji = True self.animated = bool(_animated) self.name: str = _name self.id = int(_id) else: self.name: str = emoji def __repr__(self) -> str: if self.discord_emoji: return f"<PartialEmoji name='{self.name}' id={self.id} animated={self.animated}>" return f"<PartialEmoji name='{self.name}'>" def __str__(self) -> str: return self._original_name def __int__(self) -> Optional[int]: if self.discord_emoji: return self.id return None @property def url(self) -> Optional[str]: """ `str`: Returns the URL of the emoji if it's a Discord emoji """ if self.discord_emoji: return f"{Asset.BASE}/emojis/{self.id}.{'gif' if self.animated else 'png'}" return None def to_dict(self) -> dict: """ `dict`: Returns a dict representation of the emoji """ if self.discord_emoji: # Include animated if it's a Discord emoji return {"id": self.id, "name": self.name, "animated": self.animated} return {"name": self.name, "id": None} def to_reaction(self) -> str: """ `str`: Returns a string representation of the emoji """ if self.discord_emoji: return f"{self.name}:{self.id}" return self.name # Path: discord_http/enums.py class ButtonStyles(Enum): primary = 1 secondary = 2 success = 3 danger = 4 link = 5 blurple = 1 grey = 2 gray = 2 green = 3 red = 4 url = 5 # Path: discord_http/enums.py class ComponentType(Enum): action_row = 1 button = 2 string_select = 3 text_input = 4 user_select = 5 role_select = 6 mentionable_select = 7 channel_select = 8 # Path: discord_http/enums.py class TextStyles(Enum): short = 1 paragraph = 2 # Path: discord_http/enums.py class ChannelType(Enum): guild_text = 0 dm = 1 guild_voice = 2 group_dm = 3 guild_category = 4 guild_news = 5 guild_store = 6 guild_news_thread = 10 guild_public_thread = 11 guild_private_thread = 12 guild_stage_voice = 13 guild_directory = 14 guild_forum = 15 # Path: discord_http/view.py import asyncio import inspect import logging import secrets import time from typing import Union, Optional, TYPE_CHECKING, Callable from .emoji import PartialEmoji from .enums import ButtonStyles, ComponentType, TextStyles, ChannelType from . import Snowflake from .channel import BaseChannel from .context import Context from .message import Message from .response import BaseResponse if TYPE_CHECKING: _log = logging.getLogger(__name__) __all__ = ( "Button", "ChannelSelect", "Item", "Link", "MentionableSelect", "Modal", "RoleSelect", "Select", "UserSelect", "View", ) def _garbage_id() -> str: """ `str`: Returns a random ID to satisfy Discord API """ return secrets.token_hex(16) class Item: def __init__(self, *, type: int, row: Optional[int] = None): self.row: Optional[int] = row self.type: int = type def __repr__(self) -> str: return f"<Item type={self.type} row={self.row}>" def to_dict(self) -> dict: """ `dict`: Returns a dict representation of the item """ raise NotImplementedError("to_dict not implemented") class Button(Item): def __init__( self, *, label: Optional[str] = None, style: Union[ButtonStyles, str, int] = ButtonStyles.primary, disabled: bool = False, row: Optional[int] = None, custom_id: Optional[str] = None, emoji: Optional[Union[str, dict]] = None, url: Optional[str] = None ):
super().__init__(type=int(ComponentType.button), row=row)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: catid/aiwebcam2 # Path: utils.py class ColoredFormatter(logging.Formatter): def format(self, record): def setup_colored_logging(level=logging.INFO): # Path: service_asr.py class ASRServiceRunner: def __init__(self): self.lock = asyncio.Lock() self.command_queue = Queue() self.response_queue = Queue() self.service_process = Process( target=run_loop, args=(self.command_queue, self.response_queue)) self.service_process.start() def close(self): logger.info("Stopping ASR service...") self.command_queue.put(('stop',)) self.service_process.join() self.command_queue.close() self.response_queue.close() logger.info("ASR service stopped.") async def Transcribe(self, pcm_data_array, channels, sample_rate): async with self.lock: self.command_queue.put(('transcribe', pcm_data_array, channels, sample_rate)) return await asyncio.get_running_loop().run_in_executor(None, self.response_queue.get) # Path: service_llm.py class LLMServiceRunner: def __init__(self): self.lock = asyncio.Lock() self.command_queue = Queue() self.response_queue = Queue() self.service_process = Process( target=run_loop, args=(self.command_queue, self.response_queue)) self.service_process.start() def close(self): logger.info("Stopping LLM service...") self.command_queue.put(('stop',)) self.service_process.join() self.command_queue.close() self.response_queue.close() logger.info("LLM service stopped.") async def VisionCompletionBegin(self, prompt_messages): async with self.lock: self.command_queue.put(('vision_completion', prompt_messages)) async def TextCompletionBegin(self, prompt_messages): async with self.lock: self.command_queue.put(('text_completion', prompt_messages)) # Returns None on final one async def CompletionPoll(self): return await asyncio.get_running_loop().run_in_executor(None, self.response_queue.get) # Path: service_tts.py class TTSServiceRunner: def __init__(self): self.lock = asyncio.Lock() self.command_queue = multiprocessing.Queue() self.response_queue = multiprocessing.Queue() self.service_process = multiprocessing.Process( target=run_loop, args=(self.command_queue, self.response_queue)) self.service_process.start() self.silence_duration = 0.02 self.next_pts = 0 def close(self): logger.info("Stopping background TTS worker...") self.command_queue.put(('stop',)) self.service_process.join() logger.info("Closing command_queue...") self.command_queue.close() logger.info("Closing response_queue...") self.response_queue.close() logger.info("Stopped background TTS worker.") def generate_silence_packet(self, duration_seconds): chunk = bytes.fromhex('f8 ff fe') packet = av.packet.Packet(chunk) packet.pts = self.next_pts packet.dts = self.next_pts packet.time_base = time_base_fraction pts_count = round(duration_seconds * time_base) self.next_pts += pts_count #logger.info(f"silence pts_count = {pts_count}") return packet # Grab either the next TTS Opus packet to play back, # or a silence packet if no data is available. def poll_packet(self): try: duration, pts_count, chunk = self.response_queue.get_nowait() packet = av.packet.Packet(chunk) packet.pts = self.next_pts packet.dts = self.next_pts packet.time_base = time_base_fraction self.next_pts += pts_count return packet, duration except: pass # Ignore Empty exception return self.generate_silence_packet(self.silence_duration), self.silence_duration async def Speak(self, text): async with self.lock: self.command_queue.put(('speak', text)) # Path: app.py from utils import logger from service_asr import ASRServiceRunner from service_llm import LLMServiceRunner from service_tts import TTSServiceRunner from aiortc import RTCIceCandidate, RTCSessionDescription, RTCPeerConnection from aiortc.mediastreams import AudioStreamTrack, VideoStreamTrack, MediaStreamError, MediaStreamTrack from queue import Queue from fractions import Fraction from PIL import Image from zoneinfo import ZoneInfo import socketio import asyncio import re import io import base64 import numpy as np import time, datetime import aiohttp.web import asyncio import ssl import argparse # Logging sio = socketio.AsyncServer(cors_allowed_origins='*') # Background services asr_runner = ASRServiceRunner() llm_runner = LLMServiceRunner() # WebRTC peer listening for a single browser to connect # We run each WebRTC peer in a separate process to avoid stalls in playback # WebRTC Connection class VideoReceiver(VideoStreamTrack): kind = "video" def __init__(self, track): super().__init__() # Initialize the MediaStreamTrack self.track = track self.recording = False self.recorded_frame = None def startRecording(self): self.recording = True self.recorded_frame = None def endRecording(self): self.recording = False image = self.recorded_frame self.recorded_frame = None return image async def recv(self): frame = await self.track.recv() # Process the frame (e.g., save to a file, play audio, etc.) if self.recording: if not self.recorded_frame: self.recorded_frame = frame return frame class CustomAudioStream(MediaStreamTrack): kind = "audio" def __init__(self): super().__init__() # don't forget this! self.tts = TTSServiceRunner() self.stream_time = None async def close(self): super().stop() self.tts.close() async def recv(self): packet, duration = self.tts.poll_packet() #logger.info(f"opus duration={duration} pts={packet.pts}") if self.stream_time is None: self.stream_time = time.time() wait = self.stream_time - time.time() await asyncio.sleep(wait) self.stream_time += duration return packet class WebRTCConnection: def __init__(self, sid): self.sid = sid self.pc = RTCPeerConnection() self.video_track = None self.processing_audio = False self.recording = False self.opus_track = CustomAudioStream() @self.pc.on("connectionstatechange") async def on_connectionstatechange():
logger.info(f"self.pc.connectionState = {self.pc.connectionState}")
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: chziakas/backbone-learn # Path: backbone_learn/backbone/backbone_decision_tree.py class BackboneDecisionTree(BackboneSupervised): """ Specific implementation of the Backbone method for sparse regression. This class combines Pearson correlation for feature screening, L0BnB for exact solving, and Lasso for heuristic solving to construct a sparse regression model. Inherits from: BackboneBase (ABC): The abstract base class for backbone algorithms. """ def set_solvers( self, alpha=0.5, depth=3, time_limit=1000, _lambda=0.5, num_threads=None, obj_mode="acc", n_bins=2, is_data_fit=False, ): """ Initializes the sparse regression method with specified components. Args: alpha (float): Proportion of features to retain after screening. Defaults to 0.5. depth (int, optional): Depth of BendersOCT tree. Defaults to 3. time_limit (int): Time limit for the optimization process. _lambda (float): Regularization parameter. num_threads (int or None): Number of threads for parallel processing. obj_mode (str): Objective mode, e.g., 'acc' for accuracy. n_bins (int): Number of bins for KBinsDiscretizer. Defaults to 2. is_data_fit (bool): Whether data are in the format required for OCT """ self.screen_selector = PearsonCorrelationSelector(alpha) self.exact_solver = BendersOCTDecisionTree( depth=depth, time_limit=time_limit, _lambda=_lambda, num_threads=num_threads, obj_mode=obj_mode, n_bins=n_bins, is_data_fit=is_data_fit, ) self.heuristic_solver = CARTDecisionTree() # Path: backbone_learn/heuristic_solvers/cart_decision_tree.py class CARTDecisionTree(HeuristicSolverBase): """ Implements a Classification And Regression Tree (CART) Decision Tree with cross-validation using AUC. This solver is a heuristic approach for fitting a decision tree model and identifying significant features. Attributes: _model (DecisionTreeClassifier): An instance of the sklearn DecisionTreeClassifier. _auc_score (float): The maximum AUC score obtained during cross-validation. """ def __init__(self, **kwargs): """ Initializes the CARTDecisionTree with a DecisionTreeClassifier model. """ self._model = DecisionTreeClassifier() self._auc_score = None @property def auc_score(self) -> float: """ Returns the maximum AUC score obtained from cross-validation. Returns: float: The maximum AUC score. """ return self._auc_score def fit(self, X: np.ndarray, y: np.ndarray, cv_folds: int = 5, random_state: int = 0) -> None: """ Fits a CART Decision Tree model to the data using hyperparameter tuning with cross-validation and evaluates it using AUC. Args: X (np.ndarray): The input features as a NumPy array. y (np.ndarray): The target labels as a NumPy array. cv_folds (int): The number of folds to use for cross-validation. """ self._model.set_params(random_state=random_state) # Define the parameter grid for hyperparameter tuning param_grid = {"max_depth": [None, 5, 10, 20], "min_samples_leaf": [1, 2, 4]} # Initialize GridSearchCV with the model and parameter grid grid_search = GridSearchCV( self._model, param_grid, cv=cv_folds, scoring="roc_auc", verbose=1 ) # Perform the grid search on the provided data grid_search.fit(X, y) # Update the model with the best found parameters self._model = grid_search.best_estimator_ # Store the best AUC score self._auc_score = grid_search.best_score_ def get_relevant_variables(self, threshold: float) -> np.ndarray: """ Identifies features with importance greater than a specified threshold. Args: threshold (float): The threshold for determining feature relevance. Returns: np.ndarray: An array of indices of relevant features. """ significant_indices = np.where(self._model.feature_importances_ > threshold)[0] return significant_indices def predict(self, X: np.ndarray) -> np.ndarray: """ Predicts the target labels for the given data. Args: X (np.ndarray): The input features as a NumPy array. Returns: np.ndarray: The predicted target labels. """ return self._model.predict(X) # Path: experiments/benchmark_decision_tree.py import time from itertools import product from sklearn.datasets import make_classification from sklearn.metrics import roc_auc_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder from utils import save_results from backbone_learn.backbone.backbone_decision_tree import BackboneDecisionTree from backbone_learn.heuristic_solvers.cart_decision_tree import CARTDecisionTree # Define parameter ranges for Backbone parameters alpha_range = [0.1, 0.5] beta_range = [0.5, 0.9] num_subproblems_range = [5, 10] num_iterations_range = [1] # Define parameter ranges for FlowOCT parameters depth_range = [2] _lambda_range = [0.5] # Define dataset parameters n_informative = 4 n_bins = 5 n_features_range = [20] n_samples = 500 n_classes = 2 random_state = 17 time_limit = 3600 log_filename = "decision_tree_results.json" results = [] # Experiment loop for n_features in n_features_range: # Generate synthetic classification data X, y = make_classification( n_samples=n_samples, n_informative=n_informative, n_features=n_features, n_classes=n_classes, random_state=random_state, ) # Convert features to binary est_X = KBinsDiscretizer( n_bins=n_bins, encode="ordinal", strategy="quantile", random_state=random_state ) est_X.fit(X) X_bin = est_X.transform(X) enc = OneHotEncoder(handle_unknown="error", drop="if_binary") X_cat_enc = enc.fit_transform(X_bin).toarray() # Split data into train and test sets X_train, X_test, y_train, y_test = train_test_split( X_cat_enc, y, test_size=0.2, random_state=random_state ) for depth in depth_range: # CARTDecisionTree model iteration for heuristic_model
heuristic_model = CARTDecisionTree(max_depth=depth)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: openclimatefix/Open-Source-Quartz-Solar-Forecast # Path: quartz_solar_forecast/eval/pv.py def get_pv_truth(testset: pd.DataFrame): print('Loading PV data') # download from hugginface or load from cache cache_dir = "data/pv" metadata_file = f"{cache_dir}/pv.netcdf" if not os.path.exists(metadata_file): print('Loading from HF)') os.makedirs(cache_dir, exist_ok=True) fs.get("datasets/openclimatefix/uk_pv/pv.netcdf", metadata_file) # Load in the dataset pv_ds = xr.open_dataset(metadata_file, engine="h5netcdf") combined_data = [] for index, row in testset.iterrows(): print(f'Processing {index} of {len(testset)}') pv_id = str(row["pv_id"]) base_datetime = pd.to_datetime(row["timestamp"]) # Calculate future timestamps up to the max horizon for i in range(0, 49): # 48 hours in steps of 1 hour future_datetime = base_datetime + pd.DateOffset(hours=i) horizon = i # horizon in hours try: # Attempt to select data for the future datetime selected_data = pv_ds[pv_id].sel(datetime=future_datetime) value = selected_data.values.item() value = value / 1000 # to convert from w to kw except KeyError: # If data is not found for the future datetime, set value as NaN value = np.nan # Add the data to the DataFrame combined_data.append(pd.DataFrame( {"pv_id": pv_id, "timestamp": future_datetime, "value": value, "horizon_hour": horizon}, index=[i]) ) combined_data = pd.concat(combined_data) return combined_data # Path: quartz_solar_forecast/eval/pv.py def get_pv_metadata(testset: pd.DataFrame): # download from hugginface or load from cache cache_dir = "data/pv" metadata_file = f"{cache_dir}/metadata.csv" if not os.path.exists(metadata_file): os.makedirs(cache_dir, exist_ok=True) fs.get("datasets/openclimatefix/uk_pv/metadata.csv", metadata_file) # Load in the dataset metadata_df = pd.read_csv(metadata_file) # join metadata with testset metadata_df = metadata_df.rename(columns={"ss_id": "pv_id"}) combined_data = testset.merge(metadata_df, on="pv_id", how="left") # only keep the columns we need combined_data = combined_data[ ["pv_id", "timestamp", "latitude_rounded", "longitude_rounded", "kwp"] ] # rename latitude_rounded to latitude and longitude_rounded to longitude combined_data = combined_data.rename( columns={ "latitude_rounded": "latitude", "longitude_rounded": "longitude", "kwp": "capacity", } ) # format datetime combined_data["timestamp"] = pd.to_datetime(combined_data["timestamp"]) return combined_data # Path: tests/eval/test_pv.py from quartz_solar_forecast.eval.pv import get_pv_truth, get_pv_metadata import pandas as pd def test_get_pv_metadata(): test_set_df = pd.DataFrame( [ { "timestamp": pd.Timestamp("2021-01-26 01:15:00"), "pv_id": 8215, } ] )
metadata_df = get_pv_metadata(test_set_df)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: newcastleuniversity/DISPEL # Path: dispel/providers/generic/tasks/sbt_utt/const.py MIN_MOTION_DUR = 1 # Path: dispel/signal/core.py def signal_duration(data: Union[pd.Series, pd.DataFrame]) -> float: """Get signal duration from time-based indices. Parameters ---------- data The signal of which we want to compute the duration based on its index. The index has to be either a TimedeltaIndex or DatetimeIndex. Returns ------- float The duration of the signal (in seconds) from the index. """ assert isinstance(data.index, (pd.TimedeltaIndex, pd.DatetimeIndex)) return (data.index.max() - data.index.min()).total_seconds() # Path: dispel/signal/geometric.py def extract_ellipse_axes(comps: pd.DataFrame) -> Tuple[float, float]: """Extract length of the axes of an ellipse covering 95-percentile of data. Parameters ---------- comps A pd.DataFrame with a 2-dimensional timeseries Returns ------- Union[int, int] Tuple containing: major_axis : float The length of the major axis of an ellipse minor_axis : float The length of the minor axis of an ellipse """ # Extract PCA components of the 2-dimensional planar timeseries pca = PCA(n_components=2) pca = pca.fit(comps) # Transform distribution to canonical cartesian axes data_transformed = pca.transform(comps) data_transformed_df = pd.DataFrame(data_transformed, columns=["ap", "ml"]) # Compute the min and max boundaries of 95% of data covered by the ellipse ml_min = np.quantile(data_transformed_df.ml, 0.05) ml_max = np.quantile(data_transformed_df.ml, 0.95) ap_min = np.quantile(data_transformed_df.ap, 0.05) ap_max = np.quantile(data_transformed_df.ap, 0.95) # Compute the range of each axes (i.e., ml and ap) rang_ml = abs(ml_max - ml_min) rang_ap = abs(ap_max - ap_min) # Select the minor and major axes major_axis = max([rang_ml, rang_ap]) minor_axis = min([rang_ml, rang_ap]) return major_axis, minor_axis # Path: dispel/signal/vectorial.py def mean_norm_planar(comp1: pd.Series, comp2: pd.Series) -> float: """Compute the mean norm of a 2-dimensional timeseries. The mean norm of a 2-dimensional timeseries is referred to as the Average Acceleration Amplitude eq. A2 of Martinez(2012) https://doi.org/10.1080/10255842.2011.565753 Parameters ---------- comp1 The first component of the signal comp2 The second component of the signal Returns ------- float The average value of the norm of a 2 dimensional timeseries """ return resultant_norm_planar(comp1, comp2).mean() # Path: dispel/signal/vectorial.py def resultant_norm_planar(comp1: pd.Series, comp2: pd.Series) -> pd.Series: """Compute the norm of the resultant of a 2-dimensional vector on a plane. The norm of the resultant of 2-components represents the magnitude of a 2-dimensional vector. Parameters ---------- comp1 The first component of the signal comp2 The second component of the signal Returns ------- pd.Series A series comprising the norm values of the resultant of 2-dimensional vectorial timeseries """ return np.sqrt(comp1**2 + comp2**2) # Path: dispel/signal/vectorial.py def rms_planar(comp1: pd.Series, comp2: pd.Series) -> float: """Compute the RMS of a 2-dimensional timeseries. The Root-Mean-Square of a 2-dimensional timeseries as presented in eq. A4 of Martinez(2012) https://doi.org/10.1080/10255842.2011.565753 Parameters ---------- comp1 The first component of the signal comp2 The second component of the signal Returns ------- float The RMS value of a 2-dimensional timeseries """ return np.sqrt(np.mean(comp1**2 + comp2**2)) # Path: dispel/providers/generic/tasks/sbt_utt/sbt_func.py import numpy as np import pandas as pd from dispel.providers.generic.tasks.sbt_utt.const import MIN_MOTION_DUR from dispel.signal.core import signal_duration from dispel.signal.geometric import extract_ellipse_axes from dispel.signal.vectorial import mean_norm_planar, resultant_norm_planar, rms_planar """Functionality implemented in SBT.steps module.""" def label_bouts(data: pd.Series) -> pd.Series: """Label each valid and invalid chunk as a bout. Parameters ---------- data A Series that contains one column including the flag continuous signal Returns ------- Series A labelled pd.Series where each valid/invalid bout is assigned an increasing integer number """ # We increase a counter number everytime the flag changes (solution # inspired in StakOverflow community return data.astype(bool).diff().fillna(method="bfill").cumsum() def reject_short_bouts(bout_mask: pd.Series, flag: pd.Series) -> pd.Series: """Reject bouts whose duration is less than MIN_MOTION_DUR seconds. Parameters ---------- bout_mask A Series containing a flag_signal and a bout_number. flag A Series containing a flag_signal and a bout_number. Returns ------- Series A Series with a flag_signal where the valence has been inverted in case its duration is below MIN_MOTION_DUR seconds. """ flag = flag.astype(bool) for _, bout in bout_mask.groupby(bout_mask):
if signal_duration(bout) < MIN_MOTION_DUR:
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: runDMCA/home-assistant-mazda # Path: custom_components/mazda/pymazda/sensordata/android_builds.py class AndroidBuilds: # noqa: D101 def __init__(self): # noqa: D107 self.builds = None def get_builds(self): # noqa: D102 if self.builds is None: self.builds = json.loads(ANDROID_BUILDS_JSON) return self.builds # Path: custom_components/mazda/pymazda/sensordata/sensor_data_util.py def percent_encode(str): # noqa: D100, D103 if str is None: return "" result_str = "" for char in str.encode(): if ( char >= 33 and char <= 0x7E and char != 34 and char != 37 and char != 39 and char != 44 and char != 92 ): result_str += chr(char) else: result_str += "%" result_str += format(char, "x").upper() return result_str # Path: custom_components/mazda/pymazda/sensordata/sensor_data_util.py def sum_char_codes(str): # noqa: D103 sum = 0 for char in str.encode(): if char < 0x80: sum += char return sum # Path: custom_components/mazda/pymazda/sensordata/system_info.py import random # noqa: D100 import secrets from .android_builds import AndroidBuilds from .sensor_data_util import percent_encode, sum_char_codes SCREEN_SIZES = [[1280, 720], [1920, 1080], [2560, 1440]] ANDROID_VERSION_TO_SDK_VERSION = { "11": 30, "10": 29, "9": 28, "8.1.0": 27, "8.0.0": 26, "7.1": 25, "7.0": 24, } class SystemInfo: # noqa: D101 def __init__(self): # noqa: D107 self.android_builds = AndroidBuilds() def randomize(self): # noqa: D102 device_model, device = random.choice( list(self.android_builds.get_builds().items()) ) codename = device["codename"] build = random.choice(device["builds"]) build_version_incremental = random.randrange(1000000, 9999999) self.screen_height, self.screen_width = random.choice(SCREEN_SIZES) self.battery_charging = random.randrange(0, 10) <= 1 self.battery_level = random.randrange(10, 90) self.orientation = 1 self.language = "en" self.android_version = build["version"] self.rotation_lock = "1" if random.randrange(0, 10) > 1 else "0" self.build_model = device_model self.build_bootloader = str(random.randrange(1000000, 9999999)) self.build_hardware = codename self.package_name = "com.interrait.mymazda" self.android_id = secrets.token_bytes(8).hex() self.keyboard = 0 self.adb_enabled = False self.build_version_codename = "REL" self.build_version_incremental = build_version_incremental self.build_version_sdk = ANDROID_VERSION_TO_SDK_VERSION.get(build["version"]) self.build_manufacturer = "Google" self.build_product = codename self.build_tags = "release-keys" self.build_type = "user" self.build_user = "android-build" self.build_display = build["buildId"] self.build_board = codename self.build_brand = "google" self.build_device = codename self.build_fingerprint = f"google/{codename}/{codename}:{build['version']}/{build['buildId']}/{build_version_incremental}:user/release-keys" self.build_host = f"abfarm-{random.randrange(10000, 99999)}" self.build_id = build["buildId"] def to_string(self): # noqa: D102 return ",".join( [ "-1", "uaend", "-1", str(self.screen_height), str(self.screen_width), ("1" if self.battery_charging else "0"), str(self.battery_level), str(self.orientation), percent_encode(self.language), percent_encode(self.android_version), self.rotation_lock, percent_encode(self.build_model), percent_encode(self.build_bootloader), percent_encode(self.build_hardware), "-1", self.package_name, "-1", "-1", self.android_id, "-1", str(self.keyboard), "1" if self.adb_enabled else "0", percent_encode(self.build_version_codename), percent_encode(str(self.build_version_incremental)), str(self.build_version_sdk), percent_encode(self.build_manufacturer), percent_encode(self.build_product), percent_encode(self.build_tags), percent_encode(self.build_type), percent_encode(self.build_user), percent_encode(self.build_display), percent_encode(self.build_board), percent_encode(self.build_brand), percent_encode(self.build_device), percent_encode(self.build_fingerprint), percent_encode(self.build_host), percent_encode(self.build_id), ] ) def get_char_code_sum(self): # noqa: D102
return sum_char_codes(self.to_string())
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: uysalserkan/url-shorter # Path: models/urls.py class URLS(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) long_url: str = Field(nullable=False) generated_url: str = Field(nullable=True) created_date: int = datetime.utcnow().timestamp() expire_date: int = Field(nullable=False) @classmethod def generate_randoms(cls): """Docstring.""" characters = string.ascii_letters + string.digits return ''.join(random.choice(characters) for _ in range(10)) # Path: controller/url_c.py class URLController: """Universal URL Controller.""" @classmethod def get(cls, url_id): """Get URL object with id field.""" try: url_obj = DB_engine.get(statement=select(URLS).where(URLS.id == url_id), first=True) return url_obj except Exception as exc: print("ERROR:", exc) @classmethod def delete(cls, url_id): """Delete a url with id field.""" try: url_obj = cls.get(url_id=url_id) status = DB_engine.delete(obj=url_obj, batch=False) if not status: raise Exception("Did not delete.") except Exception as exc: print("ERROR:", exc) # Path: config.py # Path: engines.py class DatabaseEngine(Singlethon): """Database Engine.""" def __init__(self): sql_file_path = os.path.join(settings.DATABASE.FOLDER_PATH, settings.DATABASE.NAME) sqlite_url = f"sqlite:///{sql_file_path}" self.engine = create_engine(sqlite_url, echo=False) def get(self, statement, first: bool): """Get elements of the sql statement.""" with Session(self.engine) as sess: results = sess.exec( statement=statement ).all() return results[0] if first else results def add(self, obj, batch: bool = False): """Add object.""" if not batch: with Session(self.engine) as sess: sess.add(obj) sess.commit() sess.refresh(obj) def delete(self, obj, batch: bool = False) -> bool: """Delete object.""" if not batch: with Session(self.engine) as sess: if not obj: return False sess.delete(obj) sess.commit() return True # Path: engines.py class MinIOEngine(Singlethon): """MinIO Engine""" def __init__(self): self.client = Minio( endpoint=settings.BUCKET.MINIO_SERVER, access_key=secrets.development.MINIO_USERNAME, secret_key=secrets.development.MINIO_PASSWORD, secure=False ) self.bucket_name = settings.BUCKET.MINIO_BUCKET def add(self, file: UploadFile, short_name: str): try: _ = self.put_object( bucket_name=self.bucket_name, object_name=short_name, data=file.file, length=file.size, metadata={ 'filename': file.filename, 'content_type': file.content_type, 'headers': file.headers, 'size': file.size } ) except Exception as exc: print("Error:", exc) def get(self, short_name: str): try: return self.client.get_object( bucket_name=self.bucket_name, object_name=short_name ) except Exception as exc: print("Error", exc) def create_bucket(self): try: if not self.client.bucket_exists(self.bucket_name): self.client.make_bucket(self.bucket_name) except Exception as exc: print(exc) def delete(self, short_url): try: self.client.remove_object( bucket_name=self.bucket_name, object_name=short_url ) except Exception as exc: print(exc) # Path: app.py import multiprocessing import time from datetime import datetime, timedelta from fastapi import FastAPI, Response, UploadFile, Request from fastapi.responses import RedirectResponse, JSONResponse from sqlmodel import select from prometheus_fastapi_instrumentator import Instrumentator from models.urls import URLS from controller.url_c import URLController from config import settings, secrets from engines import DatabaseEngine, MinIOEngine from validators import url_validation """URL Shorter API.""" app = FastAPI( title="URL Shorter Service", description="Short your long url links.", ) Instrumentator().instrument(app).expose(app)
DB_engine = DatabaseEngine()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: logicalroot/gpt-4v-demos # Path: utils.py def show_code(code): """Showing the code of the demo.""" show_code = st.sidebar.checkbox("Show code", False) if show_code: st.markdown("## Code") for function in code: # Showing the code of the demo. sourcelines, _ = inspect.getsourcelines(function) st.code(textwrap.dedent("".join(sourcelines[0:]))) # Path: parsers.py def extract_json(string): """ This function extracts the first valid JSON object from a given string. Parameters: string (str): The string from which to extract the JSON object. Returns: obj: The first valid JSON object found in the string. Raises: ValueError: If no valid JSON object is found in the string. """ start_positions = [pos for pos, char in enumerate(string) if char == "{"] end_positions = [pos for pos, char in enumerate(string) if char == "}"] for start in start_positions: for end in reversed(end_positions): if start < end: try: obj = json.loads(string[start : end + 1]) return json.dumps(obj, indent=4, ensure_ascii=False) except JSONDecodeError: continue return "{}" # Path: pages/3_📋_Quality_Control.py import streamlit as st import base64 import requests import json import components from utils import show_code from parsers import extract_json def submit(image, api_key, issue_attributes): headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"} base64_image = base64.b64encode(image).decode("utf-8") payload = { "model": "gpt-4-vision-preview", "messages": [ { "role": "system", "content": "You are an expert quality control inspector for leading manufacturers.", }, { "role": "user", "content": [ { "type": "text", "text": ( "Inspect this image and write a report in the following format:\n\n" "```json\n" "{\n" ' "issues": [\n' " {\n" f"{issue_attributes}\n" " }\n" " ]\n" "}\n" "```\n\n" "If you see any signs of quality deterioration of any kind, such as corrosion, " "physical damage, decay, or contamination, add them as separate issues in the " "`issues` array. If there are no issues, the `issues` array should be empty. " "Your response should contain only valid JSON." ), }, { "type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}, }, ], }, ], "max_tokens": 1024, "temperature": 0.1, # Response format not yet supported by GPT-4V # "response_format": {"type": "json_object"}, } try: response = requests.post( "https://api.openai.com/v1/chat/completions", headers=headers, json=payload ) response.raise_for_status()
text = extract_json(response.json()["choices"][0]["message"]["content"])
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: intel/llm-on-ray # Path: inference/api_openai_backend/query_client.py class RouterQueryClient(): def __init__(self, serve_deployments): self.serve_deployments = serve_deployments async def query(self, model: str, prompt: Prompt, request_id: str): response_stream = self.stream( model, prompt, request_id, ) responses = [resp async for resp in response_stream] return ModelResponse.merge_stream(*responses) async def stream( self, model: str, prompt: Prompt, request_id: str ): if model in self.serve_deployments: deploy_handle = self.serve_deployments[model] else: raise HTTPException(404, f"Could not find model with id {model}") prompt_content = prompt.prompt request_config = prompt.parameters temperature = request_config.get("temperature", 1.0) top_p = request_config.get("top_p", 1.0) max_new_tokens = request_config.get("max_tokens", None) gen_config = { "max_new_tokens": max_new_tokens, "temperature": temperature, "top_p": top_p, } if temperature != 1.0 or top_p != 1.0: gen_config.update({"do_sample": True}) async for x in handle_request( model=model, prompt=prompt, request_id=request_id, async_iterator=deploy_handle.options(stream=True).stream_response.options(stream=True, use_new_handle_api=True).remote(prompt_content, gen_config) ): yield x async def model(self, model_id: str) -> ModelCard: """Get configurations for a supported model""" return ModelCard( id=model_id, root=model_id, ) async def models(self) -> Dict[str, ModelCard]: """Get configurations for supported models""" metadatas = {} for model_id in self.serve_deployments: metadatas[model_id] = await self.model(model_id) return metadatas # Path: inference/api_openai_backend/router_app.py TIMEOUT = float(os.environ.get("ROUTER_HTTP_TIMEOUT", 600)) def init() -> FastAPI: async def _completions_wrapper( completion_id: str, body: CompletionRequest, response: Response, generator: AsyncGenerator[ModelResponse, None], ) -> AsyncGenerator[str, None]: async def _chat_completions_wrapper( completion_id: str, body: ChatCompletionRequest, response: Response, generator: AsyncGenerator[ModelResponse, None], ) -> AsyncGenerator[str, None]: def __init__( self, query_client: RouterQueryClient, ) -> None: async def models(self) -> ModelList: async def model_data(self, model: str) -> ModelCard: async def completions( self, body: CompletionRequest, response: FastAPIResponse, ): async def chat( self, body: ChatCompletionRequest, response: FastAPIResponse, ): async def health_check(self) -> bool: class Router: # Path: inference/api_server_openai.py import os from ray import serve from inference.api_openai_backend.query_client import RouterQueryClient from inference.api_openai_backend.router_app import Router, router_app # # Copyright 2023 The LLM-on-Ray Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # =========================================================================== # # This file is adapted from # https://github.com/ray-project/ray-llm/blob/b3560aa55dadf6978f0de0a6f8f91002a5d2bed1/aviary/backend/server/run.py # Copyright 2023 Anyscale # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # def router_application(deployments): """Create a Router Deployment. Router Deployment will point to a Serve Deployment for each specified base model, and have a client to query each one. """ merged_client = RouterQueryClient(deployments) RouterDeployment = serve.deployment( route_prefix="/", autoscaling_config={ "min_replicas": int(os.environ.get("ROUTER_MIN_REPLICAS", 2)), "initial_replicas": int(os.environ.get("ROUTER_INITIAL_REPLICAS", 2)), "max_replicas": int(os.environ.get("ROUTER_MAX_REPLICAS", 16)), "target_num_ongoing_requests_per_replica": int( os.environ.get("ROUTER_TARGET_NUM_ONGOING_REQUESTS_PER_REPLICA", 200) ), }, max_concurrent_queries=1000, # Maximum backlog for a single replica
)(serve.ingress(router_app)(Router))
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: carlhampuswall/smartknob_ha # Path: custom_components/smartknob/const.py DATA_REGISTRY = f"{DOMAIN}_storage" # Path: custom_components/smartknob/const.py SAVE_DELAY = 10 # Path: custom_components/smartknob/const.py STORAGE_KEY = f"{DOMAIN}.storage" # Path: custom_components/smartknob/logger.py _LOGGER = logging.getLogger(__name__) # Path: custom_components/smartknob/store.py from collections import OrderedDict from collections.abc import MutableMapping from typing import Dict, cast from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.storage import Store from homeassistant.loader import bind_hass from .const import DATA_REGISTRY, SAVE_DELAY, STORAGE_KEY from .logger import _LOGGER import attr @attr.s(slots=True, frozen=True) class AppEntry: """App storage entry.""" app_id = attr.ib(type=str, default=None) app_slug_id = attr.ib(type=str, default=None) entity_id = attr.ib(type=str, default=None) friendly_name = attr.ib(type=str, default=None) @attr.s(slots=True, frozen=True) class SmartknobConfig: """Smartknob device configuration, storage entry.""" mac_address = attr.ib(type=str, default=None) apps = attr.ib(type=list[AppEntry], default=None) class SmartknobStorage: """Class to hold Smartknob storage.""" def __init__(self, hass: HomeAssistant) -> None: """Initialize the Smartknob storage.""" self.hass = hass self.config: MutableMapping[ str, str ] = {} #! ADD SMARTKNOB DEVICE SPECIFIC CONFIG HERE self.knobs: MutableMapping[str, SmartknobConfig] = {} self._store = Store(hass, 1, STORAGE_KEY) async def async_load(self) -> None: """Load the registry of Smartknob.""" data = await self._store.async_load() knobs: "OrderedDict[str, AppEntry]" = OrderedDict() if data is None: return if "knobs" in data: for knob in data["knobs"]: apps = [ AppEntry( app_id=app["app_id"], app_slug_id=app["app_slug_id"], entity_id=app["entity_id"], friendly_name=app["friendly_name"], ) for (app) in knob["apps"] ] knobs[knob["mac_address"]] = SmartknobConfig( mac_address=knob["mac_address"], apps=apps ) self.knobs = knobs # TODO ADD CHECK IF NO APPS # if not apps: # await self.async_factory_default() @callback def async_schedule_save(self) -> None: """Schedule saving the registry of alarmo.""" self._store.async_delay_save(self._data_to_save, SAVE_DELAY) async def async_save(self) -> None: """Save the registry of Smartknob.""" await self._store.async_save(self._data_to_save()) @callback def _data_to_save(self) -> dict: store_data = {"knobs": [attr.asdict(entry) for entry in self.knobs.values()]} # EXAMPLE OF ADDING MORE DATA TO STORE # store_data["apps"] = [attr.asdict(entry) for entry in self.areas.values()] return store_data async def async_delete(self): """Delete all registry data."""
_LOGGER.warning("Removing Smartknob configuration data!")
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: chuzhumin98/LLM_Eval # Path: PRE/data.py class DataLoader: ''' The loader to load for evaluated task, with given prompt template to generate a series of prompts feeding for each LLM ''' def __init__(self, args): self.path_data = args['path_data'] # the load path for the data self.format = args['format'] # the data format, csv (need a title line) or json (each line is a single data item) self.path_prompt = args['path_prompt'] if 'path_prompt' in args else None # the path of prompt template. In the prompt template, using {{key}} for the replacement of the key. For example, in the prompt "You need answer a question: {{question}}", the "question" field need to be included in the data if not os.path.exists(self.path_data): raise FileExistsError("Load task data failed: file not exist!") assert self.format in ['csv', 'json'] def generate_reader(self): if self.format == 'csv': with open(self.path_data, encoding='utf-8') as f: gen = csv.DictReader(f, skipinitialspace=True) elif self.format == 'json': gen = open(self.path_data, encoding='utf-8') else: raise Exception("Invalid data format") return gen def get_prompt(self): if self.path_prompt is None: raise Exception("Exception: missing argument path_prompt") if not os.path.exists(self.path_prompt): raise FileExistsError("Load task prompt template failed: file not exist!") self.template_prompt = open(self.path_prompt, encoding='utf-8').read().strip() gen = self.generate_reader() for row in gen: if self.format == 'json': item = json.loads(row.strip()) else: item = row prompt = self.template_prompt for key in item: prompt = prompt.replace("{{" + key + "}}", item[key]) yield prompt # a generator to return each prompt def get_task_items(self): data_list = [] gen = self.generate_reader() for row in gen: if self.format == 'json': item = json.loads(row.strip()) elif self.format == 'csv': item = dict(row) data_list.append(item) return data_list # Path: PRE/api.py class Auto_API: @staticmethod def instantiate_api(api_type, args) -> LLM_API: for at, _API in API_type2class_list: if api_type == at: return _API(args) raise Exception(f"Invalid api_type: {api_type}") # Path: PRE/utils.py def parse_response(response, parse_type, nominal_list=None, nominal_ticks=None): ''' parse_type: int, float or str if parse_type = str, then required parameter nominal_list and nominal_ticks nominal_list: a series of nominal types, its name nomianl_ticks: the corresponding nominal number (int) ''' assert parse_type in ['int', 'float', 'str'] if parse_type == 'int': nums = re.findall(r"-?\d+", response) if len(nums) == 0: return None return int(nums[0]) elif parse_type == 'float': nums = re.findall(r"-?\d+\.?\d*", response) if len(nums) == 0: return None return int(nums[0]) elif parse_type == 'str': appear_pos, cur_idx = math.inf, -1 response = response.lower() for idx, label in enumerate(nominal_list): pos = response.find(label.lower()) if pos != -1: # really appear! if pos < appear_pos: appear_pos, cur_idx = pos, idx if cur_idx == -1: return None else: return nominal_ticks[cur_idx] # Path: PRE/eval.py import os import yaml import warnings import json import copy import sys import numpy as np from PRE.data import DataLoader from PRE.api import Auto_API from PRE.utils import parse_response ''' The implement of the peer review and result aggregation module ''' base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(base_dir) class PEER_REVIEW: ''' Conduct peer review, process for one prompt (pairwise or pointwise) ''' def __init__(self, args) -> None: self.parser_type = args['parser_type'] # int, float, str self.task_name = args['task_name'] self.save_dir = args['save_dir'] if self.parser_type == 'str': self.nominal_list = [nn.strip() for nn in args['nominal_list'].split(',')] self.nominal_ticks = [int(nn.strip()) for nn in args['nominal_ticks'].split(',')] else: self.nominal_list, self.nominal_ticks = None, None def peer_review_single_round(self, reviewers, prompts): ''' used in gaming sampling strategy reviewers: LLM config list prompts: an array, each item is a dict with key "prompt" return a dict to denote the results of each evaluate task under all the reviews, key: reviewer model name, value: the original response of this reviewer '''
apis_reviewer = [Auto_API.instantiate_api(config_api['api_type'], config_api) for config_api in reviewers]
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: tahaafarooq/werkzeug-hash-cracker # Path: simplifiers/simplifier.py class SimplifierSingle(object): def __init__(self, hasho, wordlist): self.hasho = hasho self.wordlist = wordlist def crack_single_hash(self): with open(self.wordlist, "r", encoding="latin-1") as wordlist_file: for word in wordlist_file: words = word.strip().split() for line in words: check_hash = check_password_hash(self.hasho, line) if check_hash: print(f"Hash: {self.hasho} Has Password {line}") exit(0) else: continue # Path: simplifiers/simplifier.py class SimplifierFile(object): def __init__(self, hash_file, wordlist): self.hash_file = hash_file self.wordlist = wordlist self.hashes = {} self.hashes_cracked = {} def interprete_hash_file(self): with open(self.hash_file, "r", encoding="latin-1") as hashs: for hasho in hashs: words = hasho.strip().split() for line in words: self.hashes[line] = True return "Saved The Hashes" def crack_hash_file(self): with open(self.hash_file, "r") as hasho: hasho = hasho.read().split() with open(self.wordlist, "r", encoding="latin-1") as wordlist_file: raw_words = wordlist_file.read().split() words = Queue() for word in raw_words: words.put(word) while not words.empty(): for i in range(0, len(hasho)): password = words.get() if check_password_hash(hasho[i], password): print(f"Hash: {hasho[i]} Has Password {password}") break else: continue break exit(0) def check_results(self): if self.hashes_cracked is not None: return self.hashes_cracked # Path: cracker.py import argparse from simplifiers.simplifier import SimplifierSingle, SimplifierFile if __name__ == "__main__": parser = argparse.ArgumentParser(description="Werkzeug Security Hash Cracker :: @tahaafarooq") parser.add_argument('--single', nargs=2, metavar=('hash', 'wordlist'), help='Crack a single hash string') parser.add_argument('--file', nargs=2, metavar=('hashfile', 'wordlist'), help='Crack a file with multiple hashes') parser.add_argument('--about', action='store_true', help='Print core information about the script and developer') args = parser.parse_args() if args.about: about = """ Werkzeug Hash Cracker: Is a minimal script that cracks hashes which are generated from werkzeug.security library in python\n About Developer: Tahaa Farooq is a cybersecurity professional with a passion in programming. Check his github for more information (https://github.com/tahaafarooq)""" print(about) elif args.single: hash_string, wordlist_file = args.single simple_crack = SimplifierSingle(hash_string, wordlist_file) simple_crack.crack_single_hash() elif args.file: hash_file, wordlist_file = args.file
simple_crack = SimplifierFile(hash_file, wordlist_file)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: victor0089/AirBnB_clone_v2 # Path: models/base_model.py class BaseModel: def __init__(self, *args, **kwargs): def __str__(self): def __repr__(self): def save(self): def to_dict(self): def delete(self): # Path: models/state.py class State(BaseModel, Base): """This is the class for State Attributes: name: input name """ __tablename__ = "states" name = Column(String(128), nullable=False) cities = relationship("City", cascade='all, delete, delete-orphan', backref="state") @property def cities(self): var = models.storage.all() lista = [] result = [] for key in var: city = key.replace('.', ' ') city = shlex.split(city) if (city[0] == 'City'): lista.append(var[key]) for elem in lista: if (elem.state_id == self.id): result.append(elem) return (result) # Path: models/city.py class City(BaseModel, Base): """This is the class for City Attributes: state_id: The state id name: input name """ __tablename__ = "cities" name = Column(String(128), nullable=False) state_id = Column(String(60), ForeignKey('states.id'), nullable=False) places = relationship("Place", cascade='all, delete, delete-orphan', backref="cities") # Path: models/user.py class User(BaseModel, Base): """This is the class for user Attributes: email: email address password: password for you login first_name: first name last_name: last name """ __tablename__ = "users" email = Column(String(128), nullable=False) password = Column(String(128), nullable=False) first_name = Column(String(128)) last_name = Column(String(128)) places = relationship("Place", cascade='all, delete, delete-orphan', backref="user") reviews = relationship("Review", cascade='all, delete, delete-orphan', backref="user") # Path: models/place.py class Place(BaseModel, Base): """This is the class for Place Attributes: city_id: city id user_id: user id name: name input description: string of description number_rooms: number of room in int number_bathrooms: number of bathrooms in int max_guest: maximum guest in int price_by_night:: pice for a staying in int latitude: latitude in flaot longitude: longitude in float amenity_ids: list of Amenity ids """ __tablename__ = "places" city_id = Column(String(60), ForeignKey("cities.id"), nullable=False) user_id = Column(String(60), ForeignKey("users.id"), nullable=False) name = Column(String(128), nullable=False) description = Column(String(1024)) number_rooms = Column(Integer, nullable=False, default=0) number_bathrooms = Column(Integer, nullable=False, default=0) max_guest = Column(Integer, nullable=False, default=0) price_by_night = Column(Integer, nullable=False, default=0) latitude = Column(Float) longitude = Column(Float) amenity_ids = [] if getenv("HBNB_TYPE_STORAGE") == "db": reviews = relationship("Review", cascade='all, delete, delete-orphan', backref="place") amenities = relationship("Amenity", secondary=place_amenity, viewonly=False, back_populates="place_amenities") else: @property def reviews(self): """ Returns list of reviews.id """ var = models.storage.all() lista = [] result = [] for key in var: review = key.replace('.', ' ') review = shlex.split(review) if (review[0] == 'Review'): lista.append(var[key]) for elem in lista: if (elem.place_id == self.id): result.append(elem) return (result) @property def amenities(self): """ Returns list of amenity ids """ return self.amenity_ids @amenities.setter def amenities(self, obj=None): """ Appends amenity ids to the attribute """ if type(obj) is Amenity and obj.id not in self.amenity_ids: self.amenity_ids.append(obj.id) # Path: models/review.py class Review(BaseModel, Base): """This is the class for Review Attributes: place_id: place id user_id: user id text: review description """ __tablename__ = "reviews" text = Column(String(1024), nullable=False) place_id = Column(String(60), ForeignKey("places.id"), nullable=False) user_id = Column(String(60), ForeignKey("users.id"), nullable=False) # Path: models/amenity.py class Amenity(BaseModel, Base): """This is the class for Amenity Attributes: name: input name """ __tablename__ = "amenities" name = Column(String(128), nullable=False) place_amenities = relationship("Place", secondary=place_amenity) # Path: models/engine/db_storage.py from os import getenv from sqlalchemy.orm import sessionmaker, scoped_session from sqlalchemy import (create_engine) from sqlalchemy.ext.declarative import declarative_base from models.base_model import Base from models.state import State from models.city import City from models.user import User from models.place import Place from models.review import Review from models.amenity import Amenity #!/usr/bin/python3 """ new class for sqlAlchemy """ class DBStorage: """ create tables in environmental""" __engine = None __session = None def __init__(self): '''instantiate new dbstorage instance''' HBNB_MYSQL_USER = getenv('HBNB_MYSQL_USER') HBNB_MYSQL_PWD = getenv('HBNB_MYSQL_PWD') HBNB_MYSQL_HOST = getenv('HBNB_MYSQL_HOST') HBNB_MYSQL_DB = getenv('HBNB_MYSQL_DB') HBNB_ENV = getenv('HBNB_ENV') self.__engine = create_engine( 'mysql+mysqldb://{}:{}@{}/{}'.format( HBNB_MYSQL_USER, HBNB_MYSQL_PWD, HBNB_MYSQL_HOST, HBNB_MYSQL_DB ), pool_pre_ping=True) if HBNB_ENV == 'test':
Base.metadata.drop_all(self.__engine)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: believethehype/nostrdvm # Path: nostr_dvm/tasks/textgeneration_llmlite.py class TextGenerationLLMLite(DVMTaskInterface): KIND: int = EventDefinitions.KIND_NIP90_GENERATE_TEXT TASK: str = "text-to-text" FIX_COST: float = 0 dependencies = [("nostr-dvm", "nostr-dvm"), ("litellm", "litellm==1.12.3")] def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config, admin_config: AdminConfig = None, options=None): dvm_config.SCRIPT = os.path.abspath(__file__) super().__init__(name, dvm_config, nip89config, admin_config, options) def is_input_supported(self, tags, client=None, dvm_config=None): for tag in tags: if tag.as_vec()[0] == 'i': input_value = tag.as_vec()[1] input_type = tag.as_vec()[2] if input_type != "text": return False return True def create_request_from_nostr_event(self, event, client=None, dvm_config=None): request_form = {"jobID": event.id().to_hex() + "_" + self.NAME.replace(" ", "")} prompt = "" if self.options.get("default_model") and self.options.get("default_model") != "": model = self.options['default_model'] else: model = "gpt-3.5-turbo" # "gpt-4-1106-preview" # This will call chatgpt and requires an OpenAI API Key set in .env if self.options.get("server") and self.options.get("server") != "": server = self.options['server'] else: server = "http://localhost:11434" # default ollama server. This will only be used for ollama models. for tag in event.tags(): if tag.as_vec()[0] == 'i': input_type = tag.as_vec()[2] if input_type == "text": prompt = tag.as_vec()[1] options = { "prompt": prompt, "model": model, "server": server } request_form['options'] = json.dumps(options) return request_form def process(self, request_form): from litellm import completion options = DVMTaskInterface.set_options(request_form) try: if options["model"].startswith("ollama"): response = completion( model=options["model"], messages=[{"content": options["prompt"], "role": "user"}], api_base=options["server"], stream=False ) print(response.choices[0].message.content) return response.choices[0].message.content else: response = completion( model=options["model"], messages=[{"content": options["prompt"], "role": "user"}], ) print(response.choices[0].message.content) return response.choices[0].message.content except Exception as e: print("Error in Module: " + str(e)) raise Exception(e) # Path: nostr_dvm/utils/admin_utils.py class AdminConfig: REBROADCAST_NIP89: bool = False UPDATE_PROFILE: bool = False DELETE_NIP89: bool = False WHITELISTUSER: bool = False UNWHITELISTUSER: bool = False BLACKLISTUSER: bool = False DELETEUSER: bool = False LISTDATABASE: bool = False ClEANDB: bool = False USERNPUB: str = "" LUD16: str = "" EVENTID: str = "" PRIVKEY: str = "" # Path: nostr_dvm/utils/dvmconfig.py def build_default_config(identifier): dvm_config = DVMConfig() dvm_config.PRIVATE_KEY = check_and_set_private_key(identifier) dvm_config.IDENTIFIER = identifier npub = Keys.from_sk_str(dvm_config.PRIVATE_KEY).public_key().to_bech32() invoice_key, admin_key, wallet_id, user_id, lnaddress = check_and_set_ln_bits_keys(identifier, npub) dvm_config.LNBITS_INVOICE_KEY = invoice_key dvm_config.LNBITS_ADMIN_KEY = admin_key # The dvm might pay failed jobs back dvm_config.LNBITS_URL = os.getenv("LNBITS_HOST") dvm_config.LN_ADDRESS = lnaddress return dvm_config # Path: nostr_dvm/utils/nip89_utils.py class NIP89Config: DTAG: str = "" NAME: str = "" KIND: int = None PK: str = "" CONTENT: str = "" # Path: nostr_dvm/utils/nip89_utils.py def check_and_set_d_tag(identifier, name, pk, imageurl): if not os.getenv("NIP89_DTAG_" + identifier.upper()): new_dtag = nip89_create_d_tag(name, Keys.from_sk_str(pk).public_key().to_hex(), imageurl) nip89_add_dtag_to_env_file("NIP89_DTAG_" + identifier.upper(), new_dtag) print("Some new dtag:" + new_dtag) return new_dtag else: return os.getenv("NIP89_DTAG_" + identifier.upper()) # Path: examples/ollama_dvm/main.py import json import dotenv from pathlib import Path from nostr_dvm.tasks.textgeneration_llmlite import TextGenerationLLMLite from nostr_dvm.utils.admin_utils import AdminConfig from nostr_dvm.utils.dvmconfig import build_default_config from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag def main(): identifier = "llama2" name = "Ollama"
dvm_config = build_default_config(identifier)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: zouXH-god/meme_web # Path: meme_generator/config.py class MemeConfig(BaseModel): class ResourceConfig(BaseModel): class GifConfig(BaseModel): class TranslatorConfig(BaseModel): class ServerConfig(BaseModel): class LogConfig(BaseModel): class Config(BaseModel, extra=Extra.ignore): def load(cls) -> "Config": def dump(self): # Path: meme_generator/exception.py class NoSuchMeme(MemeGeneratorException): status_code: int = 531 def __init__(self, meme_key: str): self.meme_key = meme_key message = f'No such meme with key="{self.meme_key}"' super().__init__(message) # Path: meme_generator/log.py class LoguruHandler(logging.Handler): def emit(self, record: logging.LogRecord): def setup_logger(): def default_filter(record: "Record"): LOGGING_CONFIG = { "version": 1, "disable_existing_loggers": False, "handlers": { "default": { "class": "meme_generator.log.LoguruHandler", }, }, "loggers": { "uvicorn.error": {"handlers": ["default"], "level": "INFO"}, "uvicorn.access": { "handlers": ["default"], "level": "INFO", }, }, } # Path: meme_generator/meme.py class UserInfo(BaseModel): class MemeArgsModel(BaseModel): class MemeArgsParser(ArgumentParser): class MemeArgsType: class MemeParamsType: class Meme: def _print_message(self, message: str, file: Optional[IO[str]] = None): def exit(self, status: int = 0, message: Optional[str] = None): async def __call__( self, *, images: Union[List[str], List[Path], List[bytes], List[BytesIO]] = [], texts: List[str] = [], args: Dict[str, Any] = {}, ) -> BytesIO: def parse_args(self, args: List[str] = []) -> Dict[str, Any]: async def generate_preview(self, *, args: Dict[str, Any] = {}) -> BytesIO: async def _generate_preview(images: List[BytesIO], texts: List[str]): # Path: meme_generator/manager.py import importlib import importlib.util import pkgutil from pathlib import Path from typing import Dict, List, Optional, Union from .config import meme_config from .exception import NoSuchMeme from .log import logger from .meme import Meme, MemeArgsType, MemeFunction, MemeParamsType _memes: Dict[str, Meme] = {} def path_to_module_name(path: Path) -> str: rel_path = path.resolve().relative_to(Path.cwd().resolve()) if rel_path.stem == "__init__": return ".".join(rel_path.parts[:-1]) else: return ".".join(rel_path.parts[:-1] + (rel_path.stem,)) def load_meme(module_path: Union[str, Path]): module_name = ( path_to_module_name(module_path) if isinstance(module_path, Path) else module_path ) try: importlib.import_module(module_name) except Exception as e: logger.opt(colors=True, exception=e).error(f"Failed to import {module_path}!") def load_memes(dir_path: Union[str, Path]): if isinstance(dir_path, Path): dir_path = str(dir_path.resolve()) for module_info in pkgutil.iter_modules([dir_path]): if module_info.name.startswith("_"): continue if not ( module_spec := module_info.module_finder.find_spec(module_info.name, None) ): continue if not (module_path := module_spec.origin): continue if not (module_loader := module_spec.loader): continue try: module = importlib.util.module_from_spec(module_spec) module_loader.exec_module(module) except Exception as e: logger.opt(colors=True, exception=e).error( f"Failed to import {module_path}!" ) def add_meme( key: str,
function: MemeFunction,
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: embrake/Aquilify # Path: aquilify/types.py T = typing.TypeVar("T") # Path: aquilify/responses.py class JsonResponse(BaseResponse): def __init__( self, content: Union[Dict, Callable, None] = {}, status: Optional[int] = 200, headers: Optional[Dict[str, Union[str, int]]] = None, content_type: str = 'application/json', encoding: Optional[str] = 'utf-8', validate: Optional[bool] = False, ) -> None: """ Create a JSON response. Args: content (Union[Dict, Callable, None]): The response content (as a dictionary). status (Optional[int]): The HTTP status code (default is 200). headers (Optional[Dict[str, Union[str, int]]]): Additional headers for the response. content_type (str): The content type for the response (default is 'application/json'). encoding (Optional[str]): The character encoding for JSON content (default is 'utf-8'). validate (Optional[bool]): Whether to validate the JSON data (default is False). """ if validate: try: json.dumps(content) except ValueError: raise ValueError("Invalid JSON content") super().__init__(json.dumps(content, ensure_ascii=False), status, headers) self.headers.setdefault('Content-Type', f'{content_type}; charset={encoding}') # Path: aquilify/middlewares/dispatcher.py import logging from typing import Awaitable, Callable, Dict, Optional, Union from ..types import ASGIApp, Receive, Scope, Send from ..responses import JsonResponse class Dispatcher: """ Dispatches incoming requests to different mounted ASGI apps based on the URL path. Usage: ```python # Create the main application main_app = Aquilify() # Create instances of the mounted apps app1 = Aquilify() app2 = Aquilify() # Create the Dispatcher instance dispatcher = Dispatcher(main_app, {}) # Map app1 to /app1 and app2 to /app2 dispatcher.map_url('/app1', app1) dispatcher.map_url('/app2', app2) # Define error handlers if necessary async def error_handler1(scope, receive, send, exc): # Custom error handling logic for app1 pass async def error_handler2(scope, receive, send, exc): # Custom error handling logic for app2 pass dispatcher.map_url('/app1', app1, error_handler1) dispatcher.map_url('/app2', app2, error_handler2) # Run the dispatcher @app.route("/") async def homepage(request): return JsonResponse({"message": "Hello, world!"}) @app.route("/app1") async def app1_homepage(request): return JSONResponse({"message": "App 1 homepage"}) @app.route("/app2") async def app2_homepage(request): return JSONResponse({"message": "App 2 homepage"}) ``` """ def __init__(self, main_app: ASGIApp, mounts: Dict[str, ASGIApp]) -> None: """ Initializes the Dispatcher instance. Args: main_app (ASGIApp): The main ASGI app to handle the requests. mounts (Dict[str, ASGIApp]): A dictionary containing mounted apps. Usage: ```python main_app = Aquilify() # create a main app instance app2 = Aquilify() #sub app for mounting in main_app dispatcher = Dispatcher(main_app, { '/app2': app2 }) Run: $ netix --debug main:dispatcher ---------- or ----------- $ uvicorn main:dispatcher """ self.main_app: ASGIApp = main_app self.mounts: Dict[str, ASGIApp] = mounts self.error_handlers: Dict[str, Optional[Callable[..., Awaitable[None]]]] = { mount_point: None for mount_point in mounts } self.logger = logging.getLogger(__name__) def map_url(self, mount_point: str, app: ASGIApp, error_handler: Optional[Callable[..., Awaitable[None]]] = None) -> None: """ Maps a URL mount point to a specified ASGI app. Args: mount_point (str): The URL mount point. app (ASGIApp): The ASGI app to mount at the specified point. error_handler (Optional[Callable[..., Awaitable[None]]]): Error handler for this mounted app. """ self.mounts[mount_point] = app self.error_handlers[mount_point] = error_handler def unmap_url(self, mount_point: str) -> None: """ Unmaps a URL mount point, removing the mounted app. Args: mount_point (str): The URL mount point to unmap. """ if mount_point in self.mounts: del self.mounts[mount_point] del self.error_handlers[mount_point] async def conditional_mount(self, mount_point: str, app: ASGIApp, condition: Union[Callable, Awaitable[bool]], error_handler: Optional[Callable[..., Awaitable[None]]] = None) -> None: """ Mounts an ASGI app based on a specified condition. Args: mount_point (str): The URL mount point. app (ASGIApp): The ASGI app to mount at the specified point. condition (Union[Callable, Awaitable[bool]]): Condition to decide the mounting. error_handler (Optional[Callable[..., Awaitable[None]]]): Error handler for this mounted app. """ if callable(condition): condition = await condition() if condition: self.mounts[mount_point] = app self.error_handlers[mount_point] = error_handler
async def dispatch(self, scope: Scope, receive: Receive, send: Send) -> None:
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Viicos/django-autotyping # Path: src/django_autotyping/_compat.py def is_relative_to(path: Path, other: Path) -> bool: # Path: src/django_autotyping/typing.py class AutotypingSettingsDict(TypedDict, total=False): class StubsGenerationSettingsDict(TypedDict, total=False): class CodeGenerationSettingsDict(TypedDict, total=False): IGNORE: list[RulesT] STUBS_GENERATION: StubsGenerationSettingsDict CODE_GENERATION: CodeGenerationSettingsDict LOCAL_STUBS_DIR: Path | None SOURCE_STUBS_DIR: Path | None ALLOW_PLAIN_MODEL_REFERENCES: bool ALLOW_NONE_SET_TYPE: bool MODEL_FIELDS_OPTIONAL: bool ALLOW_REVERSE_ARGS: bool PROJECT_DIR: Path | None DIFF: bool TYPE_CHECKING_BLOCK: bool ASSUME_CLASS_GETITEM: bool # Path: src/django_autotyping/app_settings.py from copy import deepcopy from dataclasses import dataclass, field from pathlib import Path from django.conf import LazySettings from ._compat import Self from .typing import AutotypingSettingsDict, RulesT from __future__ import annotations @dataclass class CodeGenerationSettings: """Configuration for adding type annotations to Django user code.""" PROJECT_DIR: Path | None = None """The directory of the project, where code modifications should be applied.""" DIFF: bool = False """Show changes to be applied instead of modifying existing files.""" TYPE_CHECKING_BLOCK: bool = True """Whether newly added imports should be in an `if TYPE_CHECKING` block (avoids circular imports).""" ASSUME_CLASS_GETITEM: bool = False """Whether generic classes in stubs files but not at runtime should be assumed to have a `__class_getitem__` method. This can be achieved by using `django-stubs-ext` or manually. Affected rules: `DJA001`. """ @dataclass class StubsGenerationSettings: """Configuration for dynamic stubs generation.""" LOCAL_STUBS_DIR: Path | None = None """The directory of the local type stubs. If not set, this setting must be set as a CLI argument.""" SOURCE_STUBS_DIR: Path | None = None """The directory of the source `django-stubs` to be used. Will default to the first entry in site packages. """ ALLOW_PLAIN_MODEL_REFERENCES: bool = True """Whether string references in the form of `{model_name}` should be generated in overloads. If set to `True`, both `{model_name}` and `{model_name}.{app_label}` are allowed (unless the model name has a duplicate in a different app). Affected rules: `DJAS001`. """ ALLOW_NONE_SET_TYPE: bool = False """Whether to allow having the `__set__` type variable set to `None`, even if the field is not nullable. While Django allows setting most model instance fields to any value (before saving), it is generally a bad practice to do so. However, it might be beneficial to allow `None` to be set temporarly. This also works for foreign fields, where unlike standard fields, the Django descriptor used only allows model instances and `None` to be set. Affected rules: `DJAS001`. """ MODEL_FIELDS_OPTIONAL: bool = True """Whether all model fields should be considered optional when creating model instances. This affects the following signatures: - [`Manager.create/acreate`][django.db.models.Manager] - `__init__` methods of models A lot can happen behind the scenes when instantiating models. Even if a field doesn't have a default value provided, the database could have triggers implemented that would provide one. This is why, by default, this configuration attribute defaults to `True`. If set to `False`, `django-autotyping` will try its best to determine required fields, namely by checking if: - the field can be [`null`][django.db.models.Field.null] - the field has a default or a database default value set - the field is a subclass of [`DateField`][django.db.models.DateField] and has [`auto_now`][django.db.models.DateField.auto_now] or [`auto_now_add`][django.db.models.DateField.auto_now_add] set to `True`. Affected rules: `DJAS002`. """ ALLOW_REVERSE_ARGS: bool = False """Whether type checking should be added to the `args` argument of [`reverse`][django.urls.reverse]. By default, this is set to `False` to avoid having too many overloads being generated. Moreover, only tuples can be type checked, and most people are using lists for this argument. Instead, it is recommended to use the `kwargs` argument. Affected rules: `DJAS011`. """ @dataclass class AutotypingSettings: """A class holding the django-autotyping configuration.""" IGNORE: list[RulesT] = field(default_factory=list) """A list of ignored rules.""" STUBS_GENERATION: StubsGenerationSettings = field(default_factory=StubsGenerationSettings) """Stub related settings.""" CODE_GENERATION: CodeGenerationSettings = field(default_factory=CodeGenerationSettings) """Code generation related settings.""" @classmethod
def from_django_settings(cls, settings: LazySettings) -> Self:
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: IBM/oper8 # Path: oper8/setup_vcs.py DEFAULT_DEST = "oper8_vcs" # Path: oper8/setup_vcs.py DEFAULT_TAG_EXPR = r"[0-9]+\.[0-9]+\.[0-9]+" # Path: oper8/setup_vcs.py def setup_vcs( source: str, destination: Optional[str] = None, branch_expr: Optional[List[str]] = None, tag_expr: Optional[List[str]] = __UNSET__, force: bool = False, ): """This utility will initialize an operator's VCS directory for use with oper8's VCS versioning. Args: source (str): The path to the source repository on disk destination (Optional[str]): The path where the VCS repo should be created branch_expr (Optional[List[str]]): Regular expression(s) to use to identify branches to retain in the VCS repo tag_expr (Optional[List[str]]): Regular expression(s) to use to identify tags to retain in the VCS repo force (bool): Force overwrite existing destination """ initializer = VCSRepoInitializer( source=source, destination=destination or DEFAULT_DEST, force=force ) initializer.initialize_branches( branch_expr=branch_expr, tag_expr=tag_expr if tag_expr is not __UNSET__ else [DEFAULT_TAG_EXPR], ) initializer.clean_up() # Path: oper8/cmd/base.py class CmdBase(abc.ABC): __doc__ = __doc__ @abc.abstractmethod def add_subparser( self, subparsers: argparse._SubParsersAction, ) -> argparse.ArgumentParser: """Add this command's argument parser subcommand Args: subparsers (argparse._SubParsersAction): The subparser section for the central main parser Returns: subparser (argparse.ArgumentParser): The configured parser for this command """ @abc.abstractmethod def cmd(self, args: argparse.Namespace): """Execute the command with the parsed arguments Args: args (argparse.Namespace): The parsed command line arguments """ # Path: oper8/cmd/setup_vcs_cmd.py import argparse import alog from ..setup_vcs import DEFAULT_DEST, DEFAULT_TAG_EXPR, setup_vcs from .base import CmdBase """ CLI command for setting up a VCS version repo """ # Standard # First Party # Local log = alog.use_channel("CMD-VCS") class SetupVCSCmd(CmdBase): __doc__ = __doc__ def add_subparser( self, subparsers: argparse._SubParsersAction, ) -> argparse.ArgumentParser: """Add the subparser for this command""" parser = subparsers.add_parser( "setup-vcs", help="Initialize a clean git repo to use with VCS versioning", ) command_args = parser.add_argument_group("Command Arguments") command_args.add_argument( "--source", "-s", required=True, help="Source repo to seed the clean git history", ) command_args.add_argument( "--destination", "-d",
default=DEFAULT_DEST,
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: ariebovenberg/whenever # Path: tests/common.py class AlwaysEqual: def __eq__(self, other): return True # Path: tests/common.py class AlwaysLarger: def __lt__(self, other): return False def __le__(self, other): return False def __gt__(self, other): return True def __ge__(self, other): return True # Path: tests/common.py class AlwaysSmaller: def __lt__(self, other): return True def __le__(self, other): return True def __gt__(self, other): return False def __ge__(self, other): return False # Path: tests/common.py class NeverEqual: def __eq__(self, other): return False # Path: tests/common.py @contextmanager def local_ams_tz(): with patch.dict(os.environ, {"TZ": "Europe/Amsterdam"}): tzset() yield # Path: tests/test_naive_datetime.py import pickle import weakref import pytest from datetime import datetime as py_datetime from datetime import timedelta, timezone from hypothesis import given from hypothesis.strategies import text from whenever import InvalidFormat, NaiveDateTime from .common import ( AlwaysEqual, AlwaysLarger, AlwaysSmaller, NeverEqual, local_ams_tz, ) def test_minimal(): d = NaiveDateTime(2020, 8, 15, 5, 12, 30, 450) assert d.year == 2020 assert d.month == 8 assert d.day == 15 assert d.hour == 5 assert d.minute == 12 assert d.second == 30 assert d.microsecond == 450 assert ( NaiveDateTime(2020, 8, 15, 12) == NaiveDateTime(2020, 8, 15, 12, 0) == NaiveDateTime(2020, 8, 15, 12, 0, 0) == NaiveDateTime(2020, 8, 15, 12, 0, 0, 0) ) def test_immutable(): d = NaiveDateTime(2020, 8, 15) with pytest.raises(AttributeError): d.year = 2021 # type: ignore[misc] class TestFromCanonicalStr: def test_valid(self): assert NaiveDateTime.from_canonical_str( "2020-08-15T12:08:30" ) == NaiveDateTime(2020, 8, 15, 12, 8, 30) def test_valid_three_fractions(self): assert NaiveDateTime.from_canonical_str( "2020-08-15T12:08:30.349" ) == NaiveDateTime(2020, 8, 15, 12, 8, 30, 349_000) def test_valid_six_fractions(self): assert NaiveDateTime.from_canonical_str( "2020-08-15T12:08:30.349123" ) == NaiveDateTime(2020, 8, 15, 12, 8, 30, 349_123) def test_single_space_instead_of_T(self): assert NaiveDateTime.from_canonical_str( "2020-08-15 12:08:30" ) == NaiveDateTime(2020, 8, 15, 12, 8, 30) def test_unpadded(self): with pytest.raises(InvalidFormat): NaiveDateTime.from_canonical_str("2020-8-15T12:8:30") def test_overly_precise_fraction(self): with pytest.raises(InvalidFormat): NaiveDateTime.from_canonical_str( "2020-08-15T12:08:30.123456789123" ) def test_trailing_z(self): with pytest.raises(InvalidFormat): NaiveDateTime.from_canonical_str("2020-08-15T12:08:30Z") def test_no_seconds(self): with pytest.raises(InvalidFormat): NaiveDateTime.from_canonical_str("2020-08-15T12:08") def test_empty(self): with pytest.raises(InvalidFormat): NaiveDateTime.from_canonical_str("") def test_garbage(self): with pytest.raises(InvalidFormat): NaiveDateTime.from_canonical_str("garbage") @given(text()) def test_fuzzing(self, s: str): with pytest.raises(InvalidFormat): NaiveDateTime.from_canonical_str(s) def test_equality(): d = NaiveDateTime(2020, 8, 15) different = NaiveDateTime(2020, 8, 16) same = NaiveDateTime(2020, 8, 15) assert d == same assert d != different assert not d == different assert not d != same assert hash(d) == hash(same) assert hash(d) != hash(different)
assert d == AlwaysEqual()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: DataWizual/Raycasting # Path: ray_casting.py def ray_casting(sc, player_pos, player_angle): ox, oy = player_pos xm, ym = mapping(ox, oy) cur_angle = player_angle - HALF_FOV for ray in range(NUM_RAYS): sin_a = math.sin(cur_angle) cos_a = math.cos(cur_angle) sin_a = sin_a if sin_a else 0.000001 cos_a = cos_a if cos_a else 0.000001 # verticals x, dx = (xm + TILE, 1) if cos_a >= 0 else (xm, -1) for i in range(0, WIDTH, TILE): depth_v = (x - ox) / cos_a y = oy + depth_v * sin_a if mapping(x + dx, y) in world_map: break x += dx * TILE # horizontals y, dy = (ym + TILE, 1) if sin_a >= 0 else (ym, -1) for i in range(0, HEIGHT, TILE): depth_h = (y - oy) / sin_a x = ox + depth_h * cos_a if mapping(x, y + dy) in world_map: break y += dy * TILE # projection depth = depth_v if depth_v < depth_h else depth_h depth *= math.cos(player_angle - cur_angle) proj_height = PROJ_COEFF / depth c = 255 / (1 + depth * depth * 0.00002) color = (63 + c // 2, 63 + c // 2, 63 + c // 2) pygame.draw.rect(sc, color, (ray * SCALE, HALF_HEIGHT - proj_height // 2, SCALE, proj_height)) cur_angle += DELTA_ANGLE # Path: map.py # Path: drawing.py import pygame from settings import * from ray_casting import ray_casting from map import mini_map class Drawing: def __init__(self, sc, sc_map): self.sc = sc self.sc_map = sc_map self.font = pygame.font.SysFont('Arial', 36, bold=True) def background(self): pygame.draw.rect(self.sc, SKYBLUE, (0, 0, WIDTH, HALF_HEIGHT)) pygame.draw.rect(self.sc, DARKGREY, (0, HALF_HEIGHT, WIDTH, HALF_HEIGHT)) def world(self, player_pos, player_angle): ray_casting(self.sc, player_pos, player_angle) def fps(self, clock): display_fps = str(int(clock.get_fps())) render = self.font.render(display_fps, 0, CRED) self.sc.blit(render, FPS_POS)
def mini_map(self, player):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: CV-Reimplementation/Ucolor-Reimplementation # Path: config/config.py class Config(object): r""" A collection of all the required configuration parameters. This class is a nested dict-like structure, with nested keys accessible as attributes. It contains sensible default values for all the parameters, which may be overriden by (first) through a YAML file and (second) through a list of attributes and values. Extended Summary ---------------- This class definition contains default values corresponding to ``joint_training`` phase, as it is the final training phase and uses almost all the configuration parameters. Modification of any parameter after instantiating this class is not possible, so you must override required parameter values in either through ``config_yaml`` file or ``config_override`` list. Parameters ---------- config_yaml: str Path to a YAML file containing configuration parameters to override. config_override: List[Any], optional (default= []) A list of sequential attributes and values of parameters to override. This happens after overriding from YAML file. Examples -------- Let a YAML file named "config.yaml" specify these parameters to override:: ALPHA: 1000.0 BETA: 0.5 >>> _C = Config("config.yaml", ["OPTIM.BATCH_SIZE", 2048, "BETA", 0.7]) >>> _C.ALPHA # default: 100.0 1000.0 >>> _C.BATCH_SIZE # default: 256 2048 >>> _C.BETA # default: 0.1 0.7 Attributes ---------- """ def __init__(self, config_yaml: str, config_override: List[Any] = []): self._C = CN() self._C.GPU = [0] self._C.VERBOSE = False self._C.MODEL = CN() self._C.MODEL.SESSION = 'LUT' self._C.MODEL.INPUT = 'input' self._C.MODEL.TARGET = 'target' self._C.OPTIM = CN() self._C.OPTIM.BATCH_SIZE = 1 self._C.OPTIM.SEED = 3407 self._C.OPTIM.NUM_EPOCHS = 100 self._C.OPTIM.NEPOCH_DECAY = [100] self._C.OPTIM.LR_INITIAL = 0.0002 self._C.OPTIM.LR_MIN = 0.0002 self._C.OPTIM.BETA1 = 0.5 self._C.OPTIM.WANDB = False self._C.TRAINING = CN() self._C.TRAINING.VAL_AFTER_EVERY = 3 self._C.TRAINING.RESUME = False self._C.TRAINING.TRAIN_DIR = '../dataset/Jung/train' self._C.TRAINING.VAL_DIR = '../dataset/Jung/test' self._C.TRAINING.SAVE_DIR = 'checkpoints' self._C.TRAINING.PS_W = 512 self._C.TRAINING.PS_H = 512 self._C.TRAINING.ORI = False self._C.TESTING = CN() self._C.TESTING.WEIGHT = None self._C.TESTING.SAVE_IMAGES = False # Override parameter values from YAML file first, then from override list. self._C.merge_from_file(config_yaml) self._C.merge_from_list(config_override) # Make an instantiated object of this class immutable. self._C.freeze() def dump(self, file_path: str): r"""Save config at the specified file path. Parameters ---------- file_path: str (YAML) path to save config at. """ self._C.dump(stream=open(file_path, "w")) def __getattr__(self, attr: str): return self._C.__getattr__(attr) def __repr__(self): return self._C.__repr__() # Path: data/data_RGB.py def get_training_data(rgb_dir, inp, target, img_options): assert os.path.exists(rgb_dir) return DataLoaderTrain(rgb_dir, inp, target, img_options) # Path: data/data_RGB.py def get_validation_data(rgb_dir, inp, target, img_options): assert os.path.exists(rgb_dir) return DataLoaderVal(rgb_dir, inp, target, img_options) # Path: train.py import warnings import torch.optim as optim from accelerate import Accelerator from pytorch_msssim import SSIM from torch.utils.data import DataLoader from torchmetrics.functional import peak_signal_noise_ratio, structural_similarity_index_measure from tqdm import tqdm from config import Config from data import get_training_data, get_validation_data from models import * from utils import * warnings.filterwarnings('ignore') opt = Config('config.yml') seed_everything(opt.OPTIM.SEED) def train(): # Accelerate accelerator = Accelerator(log_with='wandb') if opt.OPTIM.WANDB else Accelerator() device = accelerator.device config = { "dataset": opt.TRAINING.TRAIN_DIR } accelerator.init_trackers("shadow", config=config) if accelerator.is_local_main_process: os.makedirs(opt.TRAINING.SAVE_DIR, exist_ok=True) # Data Loader train_dir = opt.TRAINING.TRAIN_DIR val_dir = opt.TRAINING.VAL_DIR train_dataset = get_training_data(train_dir, opt.MODEL.INPUT, opt.MODEL.TARGET, {'w': opt.TRAINING.PS_W, 'h': opt.TRAINING.PS_H}) train_loader = DataLoader(dataset=train_dataset, batch_size=opt.OPTIM.BATCH_SIZE, shuffle=True, num_workers=16, drop_last=False, pin_memory=True)
val_dataset = get_validation_data(val_dir, opt.MODEL.INPUT, opt.MODEL.TARGET, {'w': opt.TRAINING.PS_W, 'h': opt.TRAINING.PS_H, 'ori': opt.TRAINING.ORI})
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: ottuco/multi-api-mocker # Path: multi_api_mocker/utils.py def group_by_url(api_mocks: List[MockAPIResponse]) -> List[MockConfiguration]: """ Organizes a list of MockAPIResponse objects by their URL and method, grouping them into lists of responses for each endpoint. This grouping is necessary for requests-mock when multiple responses for the same endpoint are required, as it allows requests-mock to cycle through the responses in order for each subsequent call to the same URL. Parameters: api_mocks (List[MockConfiguration]): A list of MockAPIResponse objects representing the expected responses for different API calls. Returns: List[MockConfiguration]: A list of MockConfiguration objects where each object contains the URL, method, and a list of responses to be used by requests-mock to simulate API interactions. """ grouped_mocks = defaultdict(list) for mock in api_mocks: # Create an instance of ResponseKwargs response_kwargs = ResponseKwargs( text=mock.text if not mock.exc else None, status_code=mock.status_code if not mock.exc else None, json=mock.json if not mock.exc else None, exc=mock.exc if mock.exc else None, ) # Add the ResponseKwargs instance, not the dict grouped_mocks[(mock.url, mock.method)].append(response_kwargs) output = [] for (url, method), kwargs_list in grouped_mocks.items(): # Convert each ResponseKwargs instance to a dict responses = [kwargs.to_dict() for kwargs in kwargs_list] config = MockConfiguration(url=url, method=method.upper(), responses=responses) output.append(config) return output # Path: multi_api_mocker/utils.py class MockSet: """ A collection class that manages MockAPIResponse objects and integrates with the requests_mock fixture. This class provides efficient access and iteration over grouped API responses by their endpoint names, simplifying the process of setting up and managing multiple mock responses in tests. It also stores and allows access to the requests_mock adapter's _Matcher objects associated with each mock response, enabling advanced interactions and assertions in tests. Parameters: api_responses (List[MockAPIResponse]): A list of MockAPIResponse objects, each representing a specific API response. requests_mock (Mocker): The requests_mock fixture instance used for registering the mock API responses. matchers (Dict[str, _Matcher]): A dictionary mapping endpoint names to their respective requests_mock adapter _Matcher objects. Attributes: _response_registry (Dict[str, MockAPIResponse]): A dictionary mapping endpoint names to their respective MockAPIResponse objects. requests_mock (Optional[Mocker]): The requests_mock fixture instance. matchers (Dict[str, _Matcher]): A dictionary of _Matcher objects, providing detailed control and inspection capabilities for the registered mock API responses. Methods: get_matcher(endpoint_name: str) -> _Matcher: Returns the _Matcher object associated with the given endpoint name. """ def __init__( self, api_responses: List[MockAPIResponse], requests_mock: Mocker = None, matchers: Dict[str, _Matcher] = None, ): self._response_registry = { response.endpoint_name: response for response in api_responses } self.requests_mock = requests_mock self.matchers = matchers or {} def __getitem__(self, endpoint_name: str) -> MockAPIResponse: return self._response_registry[endpoint_name] def __iter__(self): return iter(self._response_registry.values()) def __len__(self): return len(self._response_registry) def __repr__(self): endpoint_names = ", ".join(self._response_registry.keys()) return f"<{self.__class__.__name__} with endpoints: {endpoint_names}>" def get_matcher(self, endpoint_name: str) -> _Matcher: return self.matchers.get(endpoint_name) # Path: multi_api_mocker/contrib/pytest_plugin.py import pytest from requests_mock import Mocker from ..utils import group_by_url, MockSet @pytest.fixture(scope="function") def setup_api_mocks(requests_mock: Mocker, request) -> MockSet: """ A pytest fixture for configuring mock API responses in a test environment. It takes subclasses of MockAPIResponse, each representing a unique API call configuration. These subclasses facilitate the creation of simple or complex response flows, simulating real-world API interactions. Parameters: requests_mock (Mocker): The pytest requests_mock fixture. request: The pytest request object containing parametrized test data. Returns: MockSet: An instance of MockSet containing the organized MockAPIResponse objects, ready for use in tests. The fixture supports multiple test scenarios, allowing for thorough testing of varying API response conditions. This is especially useful for simulating sequences of API calls like Fork, Commit, and Push in a version control system context. Example Usage: - Single API Call Test: @pytest.mark.parametrize("setup_api_mocks", [([Fork()])], indirect=True) - Multi-call Sequence Test: @pytest.mark.parametrize( "setup_api_mocks", [([Fork(), Commit(), Push()])], indirect=True ) - Testing Multiple Scenarios: @pytest.mark.parametrize( "setup_api_mocks", [([Fork(), Commit(), Push()]), ([Fork(), Commit(), ForcePush()])], indirect=True ) This fixture converts the list of MockAPIResponse subclasses into MockConfiguration instances, registers them with requests_mock, and returns a MockSet object, which allows querying each mock by its endpoint name. """ # Convert the incoming parameter to a list of MockConfiguration instances
api_mocks_configurations = group_by_url(request.param)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Jisencc/yolov5_dual_weighting # Path: utils/augmentations.py def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio w1, h1 = box1[2] - box1[0], box1[3] - box1[1] w2, h2 = box2[2] - box2[0], box2[3] - box2[1] ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates # Path: utils/general.py def resample_segments(segments, n=1000): # Up-sample an (n,2) segment for i, s in enumerate(segments): s = np.concatenate((s, s[0:1, :]), axis=0) x = np.linspace(0, len(s) - 1, n) xp = np.arange(len(s)) segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy return segments # Path: utils/general.py def segment2box(segment, width=640, height=640): # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) x, y = segment.T # segment xy inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) x, y, = x[inside], y[inside] return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy # Path: utils/segment/augmentations.py import math import random import cv2 import numpy as np from ..augmentations import box_candidates from ..general import resample_segments, segment2box # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Image augmentation functions """ def mixup(im, labels, segments, im2, labels2, segments2): # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 im = (im * r + im2 * (1 - r)).astype(np.uint8) labels = np.concatenate((labels, labels2), 0) segments = np.concatenate((segments, segments2), 0) return im, labels, segments def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] height = im.shape[0] + border[0] * 2 # shape(h,w,c) width = im.shape[1] + border[1] * 2 # Center C = np.eye(3) C[0, 2] = -im.shape[1] / 2 # x translation (pixels) C[1, 2] = -im.shape[0] / 2 # y translation (pixels) # Perspective P = np.eye(3) P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) # Rotation and Scale R = np.eye(3) a = random.uniform(-degrees, degrees) # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations s = random.uniform(1 - scale, 1 + scale) # s = 2 ** random.uniform(-scale, scale) R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) # Shear S = np.eye(3) S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) # Translation T = np.eye(3) T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels) T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels) # Combined rotation matrix M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed if perspective: im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) else: # affine im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) # Visualize # import matplotlib.pyplot as plt # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() # ax[0].imshow(im[:, :, ::-1]) # base # ax[1].imshow(im2[:, :, ::-1]) # warped # Transform label coordinates n = len(targets) new_segments = [] if n: new = np.zeros((n, 4))
segments = resample_segments(segments) # upsample