prompt
stringlengths 1.74k
34.3k
| ref
stringlengths 4
432
|
---|---|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zju3dv/nr_in_a_room
# Path: models/perceptual_model.py
class VGG16_for_Perceptual(nn.Module):
class CLIP_for_Perceptual(nn.Module):
def __init__(self, requires_grad=False, n_layers=[2, 4, 14, 21]):
def forward(self, x):
def perceptual_loss(
self,
pred: torch.Tensor,
gt: torch.Tensor,
low_level: bool = True,
):
def mse_loss(source, target):
def __init__(self, requires_grad=False, model_name="ViT-B/32"):
def perceptual_loss(self, pred: torch.Tensor, gt: torch.Tensor, **kwargs):
def mse_loss(source, target):
def sim_loss(source, target):
def compute_img_embedding(self, img: torch.Tensor):
def get_perceptual_loss(
perceptual_net: Union[VGG16_for_Perceptual, CLIP_for_Perceptual],
pred: torch.Tensor,
gt: torch.Tensor,
low_level: bool = True,
):
# Path: models/perceptual_model.py
def get_perceptual_loss(
perceptual_net: Union[VGG16_for_Perceptual, CLIP_for_Perceptual],
pred: torch.Tensor,
gt: torch.Tensor,
low_level: bool = True,
):
"""
perceptual loss is suitable for whole images, not sampled rays.
pred: [B, 3, H, W]
gt: [B, 3, H, W]
"""
assert pred.shape == gt.shape
if pred.shape[2:4] != torch.Size((244, 244)):
norm = Normalize(
(0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)
)
# pred = norm(nn.Upsample((244, 244), mode="bilinear", align_corners=True)(pred))
# gt = norm(nn.Upsample((244, 244), mode="bilinear", align_corners=True)(gt))
pred = norm(pred)
gt = norm(gt)
return perceptual_net.perceptual_loss(pred, gt, low_level=low_level)
# Path: models/perceptual_model.py
class VGG16_for_Perceptual(nn.Module):
def __init__(self, requires_grad=False, n_layers=[2, 4, 14, 21]):
super(VGG16_for_Perceptual, self).__init__()
from torchvision import models
vgg_pretrained_features = models.vgg16(
pretrained=True
).features # TODO: check requires_grad
self.slice0 = nn.Sequential()
self.slice1 = nn.Sequential()
self.slice2 = nn.Sequential()
self.slice3 = nn.Sequential()
for x in range(n_layers[0]): # relu1_1
self.slice0.add_module(str(x), vgg_pretrained_features[x])
for x in range(n_layers[0], n_layers[1]): # relu1_2
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(n_layers[1], n_layers[2]): # relu3_2
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(n_layers[2], n_layers[3]): # relu4_2
self.slice3.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
# TODO: normalize
h0 = self.slice0(x)
h1 = self.slice1(h0)
h2 = self.slice2(h1)
h3 = self.slice3(h2)
return h0, h1, h2, h3
def perceptual_loss(
self,
pred: torch.Tensor,
gt: torch.Tensor,
low_level: bool = True,
):
def mse_loss(source, target):
return torch.mean((source - target) ** 2)
perceptual_loss = 0
if low_level:
pred_0 = self.slice0(pred)
gt_0 = self.slice0(gt)
perceptual_loss += mse_loss(pred_0, gt_0)
else:
pred_0, pred_1, pred_2, pred_3 = self.forward(pred)
gt_0, gt_1, gt_2, gt_3 = self.forward(gt)
perceptual_loss += mse_loss(pred_0, gt_0)
perceptual_loss += mse_loss(pred_1, gt_1)
perceptual_loss += mse_loss(pred_2, gt_2)
perceptual_loss += mse_loss(pred_3, gt_3)
return perceptual_loss
# Path: optim/patch_perceptual.py
import torch
import numpy as np
import cv2
from models import perceptual_model
from models.perceptual_model import get_perceptual_loss, VGG16_for_Perceptual
from typing import List, Optional, Any, Dict, Union
# import lpips
# loss_fn_vgg = lpips.LPIPS(net="vgg").cuda()
def get_mask_bbox(mask):
# crop image
true_indices = np.nonzero(mask)
min_h, min_w = np.min(true_indices[0]), np.min(true_indices[1])
max_h, max_w = np.max(true_indices[0]), np.max(true_indices[1])
# print(min_h, min_w)
# print(max_h, max_w)
# img = img[min_h:max_h+1,min_w:max_w+1,:]
return min_h, max_h, min_w, max_w
def patch_perceptual_loss(
| perceptual_net: VGG16_for_Perceptual, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ShramanPramanick/VoLTA
# Path: Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py
def make_roi_mask_feature_extractor(cfg):
func = _ROI_MASK_FEATURE_EXTRACTORS[cfg.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR]
return func(cfg)
# Path: Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_predictors.py
def make_roi_mask_predictor(cfg):
func = _ROI_MASK_PREDICTOR[cfg.MODEL.ROI_MASK_HEAD.PREDICTOR]
return func(cfg)
# Path: Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/roi_heads/mask_head/inference.py
def make_roi_mask_post_processor(cfg):
if cfg.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS:
mask_threshold = cfg.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS_THRESHOLD
masker = Masker(threshold=mask_threshold, padding=1)
else:
masker = None
mdetr_style_aggregate_class_num = cfg.TEST.MDETR_STYLE_AGGREGATE_CLASS_NUM
mask_post_processor = MaskPostProcessor(
masker, mdetr_style_aggregate_class_num, vl_version=cfg.MODEL.ROI_MASK_HEAD.PREDICTOR.startswith("VL")
)
return mask_post_processor
# Path: Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/roi_heads/mask_head/loss.py
def make_roi_mask_loss_evaluator(cfg):
matcher = Matcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
loss_evaluator = MaskRCNNLossComputation(
matcher, cfg.MODEL.ROI_MASK_HEAD.RESOLUTION, vl_version=cfg.MODEL.ROI_MASK_HEAD.PREDICTOR.startswith("VL")
)
return loss_evaluator
# Path: Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/roi_heads/mask_head/mask_head.py
import torch
from torch import nn
from maskrcnn_benchmark.structures.bounding_box import BoxList
from .roi_mask_feature_extractors import make_roi_mask_feature_extractor
from .roi_mask_predictors import make_roi_mask_predictor
from .inference import make_roi_mask_post_processor
from .loss import make_roi_mask_loss_evaluator
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
def keep_only_positive_boxes(boxes):
"""
Given a set of BoxList containing the `labels` field,
return a set of BoxList for which `labels > 0`.
Arguments:
boxes (list of BoxList)
"""
assert isinstance(boxes, (list, tuple))
assert isinstance(boxes[0], BoxList)
assert boxes[0].has_field("labels")
positive_boxes = []
positive_inds = []
num_boxes = 0
for boxes_per_image in boxes:
labels = boxes_per_image.get_field("labels")
inds_mask = labels > 0
inds = inds_mask.nonzero().squeeze(1)
positive_boxes.append(boxes_per_image[inds])
positive_inds.append(inds_mask)
return positive_boxes, positive_inds
class ROIMaskHead(torch.nn.Module):
def __init__(self, cfg):
super(ROIMaskHead, self).__init__()
self.cfg = cfg.clone()
self.feature_extractor = make_roi_mask_feature_extractor(cfg)
self.predictor = make_roi_mask_predictor(cfg)
| self.post_processor = make_roi_mask_post_processor(cfg) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: earthcube-lab/textnoisr
# Path: textnoisr/noise.py
class CharNoiseAugmenter:
_AVAILABLE_ACTIONS = ("insert", "swap", "substitute", "delete")
def __init__(
self,
noise_level: float,
actions: tuple[str, ...] = _AVAILABLE_ACTIONS,
character_set: tuple[str, ...] = tuple(string.ascii_letters),
seed: int | None = None,
natural_language_swap_correction: float = 1.052,
) -> None:
def _random_success(self, p: float) -> bool:
def _random_char(self, p: float, character_set: tuple[str, ...]) -> str:
def insert_random_chars(self, text: str, p: float) -> str:
def _choose_another_character(self, char):
def substitute_random_chars(self, text: str, p: float) -> str:
def delete_random_chars(self, text: str, p: float) -> str:
def consecutive_swap_random_chars(self, text: str, p: float) -> str:
def add_noise(self, text: str | list[str]) -> str | list[str]:
# Path: textnoisr/noise_dataset.py
def _add_noise_to_example(
example: dict,
noise_augmenter: noise.CharNoiseAugmenter,
feature_name: str,
) -> dict:
def add_noise(
dataset: Dataset,
noise_augmenter: noise.CharNoiseAugmenter,
feature_name: str = "tokens",
**kwargs: Any,
) -> Dataset:
# Path: tests/textnoisr/test_noise_dataset.py
from math import isclose
from datasets import load_dataset as hf_load_dataset
from evaluate import load
from textnoisr import noise, noise_dataset
import pytest
ABS_TOLERANCE = 1.5e-2
REL_TOLERANCE = 1.5e-2
@pytest.fixture()
def dataset100_text():
return hf_load_dataset("rotten_tomatoes", split="train")
@pytest.fixture()
def dataset100(dataset100_text):
def split_tokens(item):
item["tokens"] = item["text"].split(" ")
return item
return dataset100_text.map(split_tokens)
cer = load("cer")
@pytest.mark.nightly
@pytest.mark.parametrize(
"noise_level,actions",
[
(0.001, ["substitute"]),
(0.001, ["insert"]),
(0.001, ["delete"]),
(0.001, ["swap"]),
(0.001, ["delete", "insert", "substitute", "swap"]),
(0.01, ["substitute"]),
(0.01, ["insert"]),
(0.01, ["delete"]),
(0.01, ["swap"]),
(0.01, ["delete", "insert", "substitute", "swap"]),
(0.1, ["substitute"]),
(0.1, ["insert"]),
(0.1, ["delete"]),
(0.1, ["swap"]),
(0.1, ["delete", "insert", "substitute", "swap"]),
(0.15, ["substitute"]),
(0.15, ["insert"]),
(0.15, ["delete"]),
(0.15, ["swap"]),
(0.15, ["delete", "insert", "substitute", "swap"]),
(0.20, ["substitute"]),
(0.20, ["insert"]),
(0.20, ["delete"]),
(0.20, ["swap"]),
(0.20, ["delete", "insert", "substitute", "swap"]),
],
)
@pytest.mark.filterwarnings("ignore:jiwer.compute_measures")
def test_add_noise_on_split_into_words(dataset100, noise_level, actions):
noised_dataset = noise_dataset.add_noise(
dataset100,
| noise.CharNoiseAugmenter(noise_level=noise_level, actions=actions, seed=42), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: oven-lab/tuya_cloud_map_extractor
# Path: custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/const.py
class ServerError(Exception):
pass
# Path: custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/const.py
class ClientIDError(Exception):
pass
# Path: custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/const.py
class ClientSecretError(Exception):
pass
# Path: custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/const.py
class DeviceIDError(Exception):
pass
# Path: custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/tuya.py
import datetime
import hmac
import requests
from .const import ServerError, ClientIDError, ClientSecretError, DeviceIDError
def _get_sign(client_id: str, secret_key: str, url: str, t: int, token: str):
empty_hash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
signstr = client_id + token + t + "GET" + "\n" + empty_hash + "\n" + "" + "\n" + url
return hmac.new(
secret_key.encode(), msg=signstr.encode(), digestmod="sha256"
).hexdigest()
def tuyarequest(
server: str, url: str, client_id: str, secret_key: str, token=""
) -> dict:
"""Handles authentication with provided token and makes request to tuya servers."""
t = str(int(round(datetime.datetime.timestamp(datetime.datetime.now()) * 1000, 0)))
sign = _get_sign(
client_id=client_id, secret_key=secret_key, url=url, t=t, token=token
)
headers = {
"sign_method": "HMAC-SHA256",
"client_id": client_id,
"t": t,
"sign": sign.upper(),
}
if token != "":
headers["access_token"] = token
return requests.get(url=server + url, headers=headers, timeout=2.5).json()
def get_download_link(
server: str, client_id: str, secret_key: str, device_id: str
) -> str:
"""Gets the download link of the real time map."""
url = "/v1.0/token?grant_type=1"
response = tuyarequest(
server=server, url=url, client_id=client_id, secret_key=secret_key
)
if not response["success"]:
if response["msg"] == "clientId is invalid":
raise ClientIDError("Invalid Client ID")
elif response["msg"] == "sign invalid":
raise ClientSecretError("Invalid Client Secret")
elif "cross-region access is not allowed" in response["msg"]:
raise ServerError("Wrong server region. Cross-region access is not allowed.")
else:
raise RuntimeError("Request failed - Response: ", response)
access_token = response["result"]["access_token"]
url = "/v1.0/users/sweepers/file/" + device_id + "/realtime-map"
response = tuyarequest(
server=server,
url=url,
client_id=client_id,
secret_key=secret_key,
token=access_token,
)
if not response["success"]:
if response["msg"] == "permission deny":
| raise DeviceIDError("Invalid Device ID") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mlbio-epfl/hume
# Path: argparser.py
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('--phi1_path',
type=str,
required=True,
help="Path to the embeddings in first representation space")
parser.add_argument('--phi2_path',
type=str,
required=True,
help="Path to the embeddings in second representation space")
parser.add_argument('--phi1_path_val',
type=str,
help="Path to the embeddings in first representation space to compute metrics."
" If not provided phi1_path will be also used for evaluation.")
parser.add_argument('--phi2_path_val',
type=str,
help="Path to the embeddings in second representation space to compute metrics."
" If not provided phi2_path will be also used for evaluation.")
parser.add_argument('--gt_labels_path',
type=str,
required=True,
help="Path to ground truth labeling to compute metrics")
parser.add_argument('--k',
type=int,
default=10,
help="Number of classes")
parser.add_argument('--inner_lr',
type=float,
default=0.001,
help="Step size for the inner optimization")
parser.add_argument('--outer_lr',
type=float,
default=0.001,
help="Step size for the task encoder's updates")
parser.add_argument('--tau',
type=float,
default=0.1,
help="Temperature hyperparameter")
parser.add_argument('--H_reg',
type=float,
default=10.,
help="Entropy regularization coefficient")
parser.add_argument('--num_iters',
type=int,
default=1000,
help="Number of training iterations")
parser.add_argument('--adaptation_steps',
type=int,
default=300,
help="Number of inner iterations to fit linear model")
parser.add_argument('--num_subsets',
type=int,
default=20,
help="Number of (Xtr, Xte) subsets for averaging HUME's loss")
parser.add_argument('--subset_size',
type=int,
default=10000,
help="Size of union of each (Xtr, Xte) subset")
parser.add_argument('--train_fraction',
type=float,
default=0.9,
help="Fraction of args.subset_size to define size of Xtr")
parser.add_argument('--no_anneal',
dest='anneal',
action='store_false',
help="Turn off temperature and learning rate annealing")
parser.add_argument('--no_rand_init',
dest='rand_init',
action='store_false',
help="Start from random inner w0 at each outer iter or generate random w0 once")
parser.add_argument('--device',
type=str,
default="cuda",
help="Use cuda or cpu")
parser.add_argument('--exp_path',
type=str,
default="./linear_tasks/",
help="Path to save experiment's results")
parser.add_argument('--save_all',
action='store_true',
help="If used then task_encoder is saved at each iteration")
parser.add_argument('--seed',
type=int,
default=42,
help='Random seed')
return parser.parse_args(args)
# Path: activations.py
class Sparsemax(torch.nn.Module):
def __init__(self, dim=0):
self.dim = dim
super(Sparsemax, self).__init__()
def forward(self, input):
return sparsemax(input, self.dim)
# Path: utils.py
def fix_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Path: utils.py
def get_cv_score(X, y):
cv = KFold(n_splits=10, random_state=1, shuffle=True)
clf = LogisticRegression(penalty=None)
scores = cross_val_score(clf, X, y, scoring='accuracy', cv=cv, n_jobs=-1)
return np.mean(scores)
# Path: utils.py
def check_both_none_or_not_none(arg1, arg2):
return (arg1 is None and arg2 is None) or (arg1 is not None and arg2 is not None)
# Path: metrics.py
def cluster_acc(y_pred, y_true, return_matched=False):
"""
Calculate clustering accuracy. Require scipy installed
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
accuracy, in [0,1]
"""
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
row_ind, col_ind = linear_sum_assignment(w.max() - w)
if return_matched:
matched = np.array(list(map(lambda i: col_ind[i], y_pred)))
return w[row_ind, col_ind].sum() / y_pred.size, matched
else:
return w[row_ind, col_ind].sum() / y_pred.size
# Path: metrics.py
def cluster_ari(y_pred, y_true):
"""
Calculate adjusted rand index. Require scikit-learn installed
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
ARI, in [0,1]
"""
return adjusted_rand_score(y_true, y_pred)
# Path: hume.py
import os
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import learn2learn as l2l
import numpy as np
from tqdm import tqdm
from argparser import parse_args
from activations import Sparsemax
from utils import fix_seed, get_cv_score, check_both_none_or_not_none
from metrics import cluster_acc, cluster_ari
def run(args=None):
args = parse_args(args)
device = torch.device(args.device)
fix_seed(args.seed)
if not os.path.exists(args.exp_path):
os.makedirs(args.exp_path)
phi1 = np.load(args.phi1_path).astype(np.float32)
phi2 = np.load(args.phi2_path).astype(np.float32)
| assert check_both_none_or_not_none(args.phi1_path_val, args.phi2_path_val) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: MaxDude132/django-register-field
# Path: django_register/base.py
class Register:
def __init__(self):
self._key_to_class = {}
self._class_to_key = {}
def register(self, klass, db_key=None):
if db_key is None:
try:
db_key = klass.label
except AttributeError:
raise ValueError(
_(
"The class {klass} does not have a label. Define "
"one or pass a db_key to be used as database value."
).format(klass=klass)
)
if db_key in self._key_to_class:
raise ValueError(_("Key {key} already registered.").format(key=db_key))
if klass in self._class_to_key:
raise ValueError(_("Class {klass} already registered.").format(klass=klass))
self._key_to_class[db_key] = klass
self._class_to_key[klass] = db_key
return klass
def from_key(self, value):
try:
return self._key_to_class[value]
except (KeyError, TypeError):
raise ValidationError(
_("Value {value} not a registered key.").format(value=value)
)
def from_class(self, value):
try:
return self._class_to_key[value]
except KeyError:
raise ValidationError(
_("Value {value} not a registered class.").format(value=value)
)
def get_key(self, value):
try:
self.from_key(value)
except ValidationError:
return self.from_class(value)
return value
def get_class(self, value):
try:
self.from_class(value)
except ValidationError:
return self.from_key(value)
return value
@property
def max_length(self):
if self._key_to_class:
return max(len(key) for key in self._key_to_class)
@property
def choices(self):
return [
(k, self._get_verbose_name(v, k)) for k, v in self._key_to_class.items()
]
@property
def flatchoices(self):
return [
(v, self._get_verbose_name(v, k)) for k, v in self._key_to_class.items()
]
def _get_verbose_name(self, klass, key):
return getattr(klass, "verbose_name", key.replace("_", " ").title())
def __iter__(self):
return iter(self._key_to_class.values())
# Path: django_register/base.py
class RegisterChoices(metaclass=RegisterChoicesMeta):
def __new__(cls, klass):
return cls.register.get_class(klass)
# Path: django_register/base.py
class RegisterField(models.CharField):
description = _("Store a string, return the associated class")
def __init__(self, *args, **kwargs):
if "register" not in kwargs and "choices" not in kwargs:
raise ValueError(_("You must provide choices to the RegisterField."))
if "register" not in kwargs and not hasattr(kwargs["choices"], "register"):
raise ValueError(_("Choices must be a RegisterChoices instance."))
# When building the migrations, the register cannot be in the choices.
# It will be passed individually, so we take it from there.
self.register: Register = (
kwargs.pop("register")
if "register" in kwargs
else kwargs["choices"].register
)
if "choices" not in kwargs:
kwargs["choices"] = self.register.choices
if "max_length" not in kwargs and (max_length := self.register.max_length):
kwargs["max_length"] = max_length
if "default" in kwargs:
try:
kwargs["default"] = self.register.get_key(kwargs["default"])
except ValidationError:
pass
super().__init__(*args, **kwargs)
def from_db_value(self, value, expression, connection):
if not value:
return value
return self.register.get_class(value)
def get_default(self):
default = super().get_default()
if default:
return self.register.get_class(default)
return default
def to_python(self, value):
if not value:
return value
return self.register.get_class(value)
def get_prep_value(self, value):
if not value:
return value
return self.register.get_key(value)
def value_from_object(self, obj):
value = super().value_from_object(obj)
return self.get_prep_value(value)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
kwargs.pop("choices", None)
kwargs["register"] = self.register
return name, path, args, kwargs
def clean(self, value, model_instance):
"""
We need to override clean because it runs the validations on the
Python object instead of on the database string.
"""
value = self.get_prep_value(value)
self.validate(value, model_instance)
self.run_validators(value)
return self.to_python(value)
def _get_flatchoices(self):
return self.register.flatchoices
flatchoices = property(_get_flatchoices)
def _register_choices(self):
return self.register.choices
def _register_choices_set(self, value):
return
choices = property(_register_choices, _register_choices_set)
_choices = property(_register_choices, _register_choices_set)
# Path: tests/models.py
from dataclasses import dataclass
from django.db import models
from django_register import Register, RegisterChoices, RegisterField
# Standard libraries
# Django
# django_register
@dataclass(unsafe_hash=True)
class CountryInfo:
population: int
capital: str
class CountryChoices(RegisterChoices):
CANADA = CountryInfo(population=37_742_154, capital="Ottawa")
FRANCE = CountryInfo(population=65_273_511, capital="Paris")
GERMANY = CountryInfo(population=83_783_942, capital="Berlin")
UNITED_STATES = CountryInfo(population=331_900_000, capital="Washington")
@dataclass(unsafe_hash=True)
class ContinentInfo:
label: str
@dataclass(unsafe_hash=True)
class FoodInfo:
verbose_name: str
| food_register = Register() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: hsouri/bob-classification
# Path: medical_chexpert/util/custom_transforms.py
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
# Path: medical_chexpert/util/augment.py
def new_data_aug_generator(args=None, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
img_size = args.input_size
remove_random_resized_crop = args.src
# mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
primary_tfl = []
scale = (0.08, 1.0)
interpolation = 'bicubic'
if remove_random_resized_crop:
primary_tfl = [
transforms.Resize(img_size, interpolation=3),
transforms.RandomCrop(img_size, padding=4, padding_mode='reflect'),
transforms.RandomHorizontalFlip()
]
else:
primary_tfl = [
RandomResizedCropAndInterpolation(
img_size, scale=scale, interpolation=interpolation),
transforms.RandomHorizontalFlip()
]
secondary_tfl = [transforms.RandomChoice([gray_scale(p=1.0),
Solarization(p=1.0),
GaussianBlur(p=1.0)])]
# if args.color_jitter is not None and not args.color_jitter == 0:
# secondary_tfl.append(transforms.ColorJitter(args.color_jitter, args.color_jitter, args.color_jitter))
final_tfl = [
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std))
]
return transforms.Compose(primary_tfl + secondary_tfl + final_tfl)
# Path: medical_chexpert/util/datasets.py
import os
import PIL
import torch
from torchvision import datasets, transforms
from timm.data import create_transform
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from util.dataloader_med import RetinaDataset, Augmentation, Node21, ChestX_ray14, Covidx, CheXpert
from .custom_transforms import GaussianBlur
from .augment import new_data_aug_generator
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# --------------------------------------------------------
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
print(dataset)
return dataset
def build_dataset_chest_xray(split, args):
is_train = (split == 'train')
# transform = build_transform(is_train, args)
if args.build_timm_transform:
transform = build_transform(is_train, args)
else:
if is_train:
if args.aug_strategy == 'simclr_with_randrotation':
print(args.aug_strategy)
transform = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomRotation(degrees=(0, 45)),
transforms.RandomGrayscale(p=0.2),
| transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Salz0/telegram_flea
# Path: models.py
class Message(BaseModel):
"""The model for the Telegram message."""
from_user: fields.ForeignKeyRelation[User] = fields.ForeignKeyField(
"bot.User", related_name="messages"
)
id = fields.IntField(pk=True, generated=True)
# In Telegram, `message_id` is unique only **within a chat**.
message_id = fields.BigIntField() # for the sake of safety, this is a `BigIntField`
# TODO: [3/20/2023 by Mykola] Make this a foreign key to the Chat model
chat_id = fields.BigIntField()
reply_to_message: fields.ForeignKeyRelation[Message] = fields.ForeignKeyField(
"bot.Message", related_name="replies", null=True
)
content_type = fields.TextField(null=True)
text = fields.TextField(null=True)
date = fields.DatetimeField()
is_handled = fields.BooleanField(default=False)
content = fields.BinaryField(null=True)
status = fields.CharField(max_length=32, null=True)
complete_message_json = fields.JSONField(null=True)
replies: fields.BackwardFKRelation[Message]
# Path: models.py
class User(BaseModel):
"""
The model for the Telegram user.
This model stores all the information about the user.
It is also used to store all the authentication-related information.
"""
id = fields.BigIntField(pk=True, generated=False)
username = fields.CharField(max_length=32, null=True)
first_name = fields.TextField(null=True)
last_name = fields.TextField(null=True)
phone_number = fields.CharField(max_length=14, null=True)
language_code = fields.CharField(max_length=2, null=True)
is_bot = fields.BooleanField(default=False)
start_payload = fields.TextField(null=True)
is_active = fields.BooleanField(default=True)
has_bot_blocked = fields.BooleanField(default=False)
is_beta = fields.BooleanField(default=False)
is_deleted = fields.BooleanField(default=False)
is_admin = fields.BooleanField(default=False)
is_staff_member = fields.BooleanField(default=False)
messages: fields.ReverseRelation[Message]
@property
def full_name(self):
"""Get the full name of the user."""
if not self.last_name:
return self.first_name
return f"{self.first_name} {self.last_name}"
# Path: utils/loguru_logging.py
class InterceptHandler(logging.Handler):
def emit(self, record):
# Path: middlewares/message_logging_middleware.py
from aiogram import types
from aiogram.dispatcher.middlewares import BaseMiddleware
from arrow import arrow
from models import Message, User
from utils.loguru_logging import logger
"""The middleware to log all the incoming messages into the database."""
class MessagesLoggingMiddleware(BaseMiddleware):
"""The middleware class, inherited from `BaseMiddleware`."""
@staticmethod
async def _save_message(msg: types.Message) -> Message:
"""Save the message into the database."""
if msg.reply_to_message:
reply_to_message = await Message.get_or_none(
message_id=msg.reply_to_message.message_id,
chat_id=msg.chat.id, # `message_id` is not unique. For details, see `models.py`.
)
else:
reply_to_message = None
return await Message.create(
# Primary fields
message_id=msg.message_id,
from_user_id=msg.from_user.id,
chat_id=msg.chat.id,
text=msg.text,
date=msg.date,
# Other fields that might be useful
reply_to_message=reply_to_message,
content_type=msg.content_type,
complete_message_json=msg.as_json(),
)
async def on_pre_process_message(self, msg: types.Message, *_, **__):
"""Save the message into the database _before_ processing it."""
user_data: dict = msg.from_user.to_python()
try:
# Create a user first, if not exist. Otherwise, we are unable to create a message
# with a foreign key.
user, created = await User.get_or_create(id=user_data.pop("id"), defaults=user_data)
if created:
if payload := msg.get_args():
user.start_payload = payload
await user.save()
| logger.info( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: RobertCsordas/moe_layer
# Path: triton_src/moe_layer/cvmm.py
def cvmm(x: torch.Tensor, sel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):
if not isinstance(sel, CVMMSel):
sel = cvmm_prepare_sel(sel, keys.shape[0])
return CVMM.apply(x, sel.sel_index, sel.sel, keys, sel.out_index, sel.reduction_weight)
# Path: triton_src/moe_layer/cvmm.py
def cvmm_prepare_sel2(sel: torch.Tensor, w: Optional[torch.Tensor] = None) -> CVMMSel:
# Has multiple selections for each batch element
n_per_batch = sel.shape[-1]
# indices = torch.arange(sel.nelement() // n_per_batch, device=sel.device, dtype=torch.int32)
# indices = indices.repeat_interleave(n_per_batch).flatten()
fsel = sel.flatten()
ssel, sel_index = fsel.sort()
# in_index = indices[sel_index]
in_index = sel_index // n_per_batch
return CVMMSel(sel, ssel.view_as(sel), in_index, sel_index, w)
# Path: triton_src/moe_layer/cvmm.py
class CVMMSel:
raw_sel: torch.Tensor
sel: torch.Tensor
sel_index: torch.Tensor
out_index: Optional[torch.Tensor] = None
reduction_weight: Optional[torch.Tensor] = None
def clone(self) -> 'CVMMSel':
return CVMMSel(self.raw_sel, self.sel, self.sel_index, self.out_index, self.reduction_weight)
# Path: triton_src/moe_layer/moe_layer_simple.py
import torch
import torch.distributed
import torch.nn.functional as F
import math
from typing import Tuple, List, Optional
from .cvmm import cvmm, cvmm_prepare_sel2, CVMMSel
def dist_logsumexp(x: torch.Tensor, dim: int, keepdim: bool = False) -> torch.Tensor:
# Calculate numerically stable distributed logsumexp
xmax = x.max(dim=dim, keepdim=True).values
torch.distributed.all_reduce(xmax, op=torch.distributed.ReduceOp.MAX)
xe = (x - xmax).exp().sum(dim=dim, keepdim=True)
torch.distributed.all_reduce(xe, op=torch.distributed.ReduceOp.SUM)
res = (xmax + xe.log())
if not keepdim:
res = res.squeeze(dim)
return res
def log_mean(x: torch.Tensor, dim: int = 0):
if torch.distributed.is_initialized():
xlse = dist_logsumexp(x, dim=dim)
# Normalize
n = torch.tensor(x.shape[dim]).to(x.device)
torch.distributed.all_reduce(n, op=torch.distributed.ReduceOp.SUM)
return xlse - n.log()
else:
return x.logsumexp(dim) - math.log(x.shape[dim])
def entropy_l(l: torch.Tensor) -> torch.Tensor:
return - (l * l.exp()).sum(-1)
class MoE(torch.nn.Module):
def __init__(self, dmodel: int, n_experts: int, expert_size: int, k: int,
dropout: float = 0, selection_mode: str = "sigmoid",
activation_after_topk: bool = False,
activation=F.relu,
bias: bool = False, v_dim: Optional[int] = None,
sinkhorn_n_iters: int = 3, expert_dropout: float = 0.0,
weight_std_scale: float = 1.0):
super().__init__()
self.k_dim = dmodel
self.v_dim = v_dim if v_dim is not None else dmodel
self.n_experts = n_experts
self.expert_size = expert_size
self.size = self.n_experts * self.expert_size
self.dropout = dropout
self.selection_mode = selection_mode
self.k_vec_dim = self.k_dim
self.n_heads = k
self.activation_after_topk = activation_after_topk
self.activation = activation
self.sinkhorn_n_iters = sinkhorn_n_iters
self.expert_dropout = expert_dropout
if self.selection_mode not in {"softmax", "sigmoid", "sinkmoid"}:
raise ValueError(f"Unknown selection mode {self.selection_mode}")
self.keys = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim, self.expert_size))
self.values = torch.nn.Parameter(torch.empty(self.n_experts, self.expert_size, self.v_dim))
self.expert_sel = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim))
torch.nn.init.normal_(self.keys, std=dmodel ** -0.5 * weight_std_scale)
torch.nn.init.normal_(self.values, std=self.size ** -0.5 * weight_std_scale)
torch.nn.init.normal_(self.expert_sel, std=self.k_vec_dim ** -0.5 * weight_std_scale)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(self.n_experts, self.expert_size))
self.o_bias = torch.nn.Parameter(torch.zeros(self.v_dim))
else:
self.bias = None
self.o_bias = None
self.renorm_keep_std(self.expert_sel, dim=1)
def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0):
with torch.no_grad():
std = weight.std()
weight.div_(weight.norm(dim=dim, keepdim=True))
weight.mul_(std / weight.std())
def entropy_reg(self, sel: torch.Tensor) -> float:
# Everything is done in log scale
sel = sel.flatten(0, -2)
sel = F.log_softmax(sel, dim=-1)
sel = log_mean(sel, -2)
return - entropy_l(sel).mean()
def compute_scores(self, input: torch.Tensor, index: CVMMSel) -> torch.Tensor:
| scores = cvmm(input, index, self.keys) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: meanii/downly
# Path: downly/downly.py
class Downly(Client):
"""
Downly 🦉
"""
def __init__(self):
name = self.__class__.__name__.lower()
self.telegram = telegram
super().__init__(
name,
api_id=self.telegram.get('api_id'),
api_hash=self.telegram.get('api_hash'),
bot_token=self.telegram.get('bot_token'),
workdir=str(Path.cwd()),
workers=16,
plugins=dict(
root=f"{name}.plugins",
),
sleep_threshold=180
)
self.uptime_reference = time.monotonic_ns()
self.start_datetime = datetime.utcnow()
async def start(self):
await super().start()
me = await self.get_me()
bot.username = me.username
bot.id = me.id
logger.info(f"Downly 🦉 v{__version__} (Layer {layer}) started on @{me.username}. Hi.")
async def stop(self, *args):
await super().stop()
logger.info("Downly 🦉 stopped. Bye.")
# Path: downly/utils/b_logger.py
def b_logger(func):
async def wrapper(client, message: Message):
# checking if a message is url then log
if not validate_url(message.text):
await func(client, message)
return
# logging message
if message.from_user: # if a message is from a user
logger.info(f"New message from {message.from_user.first_name}({message.from_user.id})"
f" in {message.chat.title}({message.chat.id}) -"
f" [MESSAGE]: {message.text}")
if message.from_user is None: # if a message is from channel
logger.info(f"New message from {message.chat.title}({message.chat.id}) -"
f" [MESSAGE]: {message.text}")
return await func(client, message)
return wrapper
# Path: downly/database/users_sql.py
def update_user(user_id: int, username: str):
with INSERTION_LOCK:
user = SESSION.query(Users).get(user_id)
if not user:
user = Users(user_id, username)
logger.info(f'[DB]: adding new user to db {user_id} ({username})')
SESSION.add(user)
SESSION.flush()
else:
user.username = username
SESSION.commit()
# Path: downly/database/users_sql.py
def update_chat(chat_id: str, chat_name: str):
with INSERTION_LOCK:
chat = SESSION.query(Chats).get(str(chat_id))
if not chat:
chat = Chats(chat_id, chat_name)
logger.info(f'[DB]: adding new chat to db {chat_id} ({chat_name})')
SESSION.add(chat)
SESSION.flush()
else:
chat.chat_name = chat_name
SESSION.commit()
# Path: downly/plugins/logger.py
from pyrogram import filters, Client
from pyrogram.types import Message
from pyrogram.enums import ChatType
from downly.downly import Downly
from downly.utils.b_logger import b_logger
from downly.database.users_sql import update_user, update_chat
@Downly.on_message(filters.private | filters.group | filters.channel, group=2)
@b_logger
async def logger(client: Client, message: Message):
# check if a message is command then do nothing
if message.chat.type == ChatType.GROUP or message.chat.type == ChatType.SUPERGROUP:
update_chat(str(message.chat.id), message.chat.title)
if message.from_user:
| update_user(message.from_user.id, message.from_user.username) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: hnesk/flipper-raw-rfid
# Path: flipper_raw_rfid/utils.py
def batched(iterable: Iterable[Any], n: int) -> Iterable[tuple[Any, ...]]:
# batched('ABCDEFG', 3) --> ABC DEF G
if n < 1:
raise ValueError('n must be at least one')
it = iter(iterable)
while batch := tuple(islice(it, n)):
yield batch
# Path: flipper_raw_rfid/utils.py
class Peak:
"""
A peak in a distribution described by left, center and right index
"""
left: int = field(compare=False)
center: int = field(compare=False)
right: int = field(compare=False)
height: float = field(default=0.0, repr=False)
def merge(self, other: Peak) -> Peak:
"""
Merge this peak with another peak
:param other: Peak to merge with
:return: merged peak
"""
return Peak(
min(self.left, other.left),
(self.center + other.center) // 2,
max(self.right, other.right),
max(self.height, other.height)
)
def slice(self, distribution: npt.NDArray[Any]) -> npt.NDArray[Any]:
"""
Slice the distribution with the peak
:param distribution:
:return:
"""
return distribution[self.left:self.right]
def fit(self, distribution: npt.NDArray[Any], quantile: float = 1.0) -> Peak:
"""
Fit the distribution to the peak
:param distribution:
:param quantile:
:return:
"""
my_excerpt = distribution[self.left:self.right]
if quantile < 1.0:
to_capture = numpy.sum(my_excerpt) * quantile
def objective(thr: float) -> float:
# 1.0 for capturing enough and a little nudge to find bigger thresholds
return cast(float, 1.0 * (to_capture > numpy.sum(my_excerpt[my_excerpt > thr])) - thr * 0.0001)
res = minimize_scalar(objective, (0, my_excerpt.max()))
threshold = int(res.x)
else:
threshold = 0
first, *_, last = (my_excerpt > threshold).nonzero()[0]
return Peak(
self.left + first - 1,
self.left + (first + last) // 2,
self.left + last + 1,
my_excerpt[first:last].max()
)
def __contains__(self, v: float | int) -> bool:
"""
Check if a value is inside the peak
:param v: value to check
:return:
"""
return self.left <= v <= self.right
# Path: flipper_raw_rfid/bits.py
import re
import numpy
import numpy.typing as npt
from flipper_raw_rfid.utils import batched, Peak
"""
Utilities for working with bitstreams
"""
def decode_lengths(pads: npt.NDArray[numpy.int64], peaks: list[Peak]) -> tuple[npt.NDArray[numpy.int8], int]:
"""
Loops through pulses and durations and matches them to peaks
Checks for the length of the peak as a multiple of the first peak and adds as many 1/0 to the result
:param pads: Pulse and duration values
:param peaks: A list of peaks from find_peaks, the center frequencies should be more or less multiples of the first peak
:return: The decoded bitstream
"""
result: list[int] = []
position = 0
result_position = None
first_length = peaks[0].center
for high, duration in pads:
low = duration - high
high_peak = None
low_peak = None
for p in peaks:
if high in p:
high_peak = p
if low in p:
low_peak = p
if high_peak and low_peak:
break
if not (high_peak and low_peak):
if not high_peak:
print(f'Found nothing for high {high}, restarting')
if not low_peak:
print(f'Found nothing for low {low}, restarting')
result = []
result_position = position
continue
result.extend([1] * int(round(high_peak.center / first_length)))
result.extend([0] * int(round(low_peak.center / first_length)))
position += duration
return numpy.array(result, dtype=numpy.int8), result_position
def decode_manchester(manchester: npt.NDArray[numpy.int8], biphase: bool = True) -> npt.NDArray[numpy.int8]:
"""
Decode manchester encoded bitstream
:param manchester: manchester encoded bitstream
:param biphase: True for biphase, False for diphase
:return: decoded bitstream
"""
if manchester[0] == manchester[1]:
manchester = manchester[1:]
result = []
| for pair in batched(manchester, 2): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: xingchenshanyao/YOLOP-E
# Path: lib/utils/utils.py
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
# Path: lib/utils/augmentations.py
def letterbox_for_img(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_AREA)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
# Path: lib/dataset/DemoDataset.py
import glob
import os
import random
import shutil
import time
import cv2
import math
import numpy as np
import torch
from pathlib import Path
from threading import Thread
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from ..utils import letterbox_for_img, clean_str
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
h0, w0 = img0.shape[:2]
self.frame += 1
print('\n video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) # BGR
#img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB)
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: \n' % (self.count, self.nf, path), end='')
h0, w0 = img0.shape[:2]
# Padded resize # 填充尺寸,640*360*3 -> 640*384*3
| img, ratio, pad = letterbox_for_img(img0, new_shape=self.img_size, auto=True) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: godisboy0/nonebot-adapter-wcf
# Path: adapters/wechatferry/exception.py
class ApiNotAvailable(BaseApiNotAvailable, WechatFerryAdapterException):
"""API 连接不可用"""
# Path: adapters/wechatferry/basemodel.py
class UserInfo():
def __init__(self, wx_id: str, code: str, wx_name: str, gender: str):
self.wx_id = wx_id # 微信id,原始id。会被作为真正的user_id
self.code = code # code 微信允许改id后,新改的id的code
self.wx_name = wx_name # 微信昵称
self.gender = gender # 性别
def __str__(self) -> str:
return f"wx_id: {self.wx_id}, code: {self.code}, wx_name: {self.wx_name}, gender: {self.gender or ''}"
# Path: adapters/wechatferry/sqldb.py
class database:
def __init__(self, file_path, db_name="wcf") -> None:
## 如果同参数
global singleton_dict
if hasattr(singleton_dict, file_path):
self.conn = getattr(singleton_dict, file_path)
return
if not file_path:
raise ValueError("file_path can not be empty")
if not os.path.exists(file_path):
os.makedirs(file_path, exist_ok=True)
datafile = os.path.join(file_path, db_name)
self.conn = sqlite3.connect(datafile)
singleton_dict.file_path = self.conn
def create_table(self, sql: str) -> None:
cursor = self.conn.cursor()
try:
cursor.execute(sql)
self.conn.commit()
except Exception as e:
logger.error(f"Failed to create table: {e}")
raise e
finally:
cursor.close()
def query(self, sql, *args) -> list:
cursor = self.conn.cursor()
try:
cursor.execute(sql, args)
return cursor.fetchall()
except Exception as e:
logger.error(f"Failed to query: {e}")
raise e
finally:
cursor.close()
def execute(self, sql: str, *args) -> None:
cursor = self.conn.cursor()
try:
cursor.execute(sql, args)
self.conn.commit()
except Exception as e:
logger.error(f"Failed to execute: {e}")
raise e
finally:
cursor.close()
def insert(self, sql: str, *args) -> None:
cursor = self.conn.cursor()
try:
cursor.execute(sql, args)
self.conn.commit()
except Exception as e:
logger.error(f"Failed to insert: {e}")
raise e
finally:
cursor.close()
def update(self, sql: str, *args) -> None:
cursor = self.conn.cursor()
try:
cursor.execute(sql, args)
self.conn.commit()
except Exception as e:
logger.error(f"Failed to update: {e}")
raise e
finally:
cursor.close()
def delete(self, sql: str, *args) -> None:
cursor = self.conn.cursor()
try:
cursor.execute(sql, args)
self.conn.commit()
except Exception as e:
logger.error(f"Failed to delete: {e}")
raise e
finally:
cursor.close()
def table_exists(self, table_name: str) -> bool:
sql = f"SELECT count(*) FROM sqlite_master WHERE type='table' AND name='{table_name}'"
cursor = self.conn.cursor()
try:
cursor.execute(sql)
return cursor.fetchone()[0] == 1
except Exception as e:
logger.error(f"Failed to check table {table_name} exists: {e}")
return False
finally:
cursor.close()
# Path: adapters/wechatferry/utils.py
class Logger:
class downloader:
def __init__(self) -> None:
def info(self, msg: str, e: Exception=None) -> None:
def error(self, msg: str, e: Exception=None) -> None:
def debug(self, msg: str, e: Exception=None) -> None:
def warning(self, msg: str, e: Exception=None) -> None:
def handle_api_result(result: Optional[Dict[str, Any]]) -> Any:
def file_md5(file_path) -> Optional[str]:
def __init__(self, url, file_name, path: str, override: bool = True, chunk_size: int = 1024, headers={}) -> None:
async def downloadAsync(self) -> str:
def download(self) -> str:
# Path: adapters/wechatferry/config.py
class AdapterConfig(BaseModel):
"""wechatferry 配置类"""
root_user: str
debug: bool = Field(default=True)
"""是否开启调试模式"""
db_path: str = Field(default="./data")
"""数据库路径,默认为当前运行路径下的 data 文件夹,该文件夹已经被 .gitignore 忽略"""
echo_root_msg: bool = Field(default=False)
"""是否将 root_user 的信息直接做成json回传给root_user"""
"""在debug时非常有用,特别是你的开发机器和部署微信的机器不是同一台时。用过的都说好"""
class Config:
extra = "ignore"
# Path: adapters/wechatferry/api.py
from wcferry import Wcf
from typing import Any
from .exception import ApiNotAvailable
from concurrent.futures import ThreadPoolExecutor
from .basemodel import UserInfo
from .sqldb import database
from .utils import file_md5, logger
from .config import AdapterConfig
import asyncio
"""
所有的 api 都定义在这里。
call_api 的所有方法最终都会调用这里的方法。
"""
"""
发现绝大多数插件都是为 onebot.v11 所写,为了更好的复用(白嫖),这里也用 onebot.v11 中相关的数据结构。
参数约定:
to_wx_id: 群聊时为群聊id, 非群聊时为用户id
"""
user_cache = {}
md5_executor = ThreadPoolExecutor(max_workers=1)
class API:
| def __init__(self, wcf: Wcf, config: AdapterConfig): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: R1999RC-official/Reverse1999ResonanceCalculator
# Path: python/python_env/Lib/site-packages/setuptools/warnings.py
class SetuptoolsWarning(UserWarning):
"""Base class in ``setuptools`` warning hierarchy."""
@classmethod
def emit(
cls,
summary: Optional[str] = None,
details: Optional[str] = None,
due_date: Optional[_DueDate] = None,
see_docs: Optional[str] = None,
see_url: Optional[str] = None,
stacklevel: int = 2,
**kwargs,
):
"""Private: reserved for ``setuptools`` internal use only"""
# Default values:
summary_ = summary or getattr(cls, "_SUMMARY", None) or ""
details_ = details or getattr(cls, "_DETAILS", None) or ""
due_date = due_date or getattr(cls, "_DUE_DATE", None)
docs_ref = see_docs or getattr(cls, "_SEE_DOCS", None)
docs_url = docs_ref and f"https://setuptools.pypa.io/en/latest/{docs_ref}"
see_url = see_url or getattr(cls, "_SEE_URL", None)
due = date(*due_date) if due_date else None
text = cls._format(summary_, details_, due, see_url or docs_url, kwargs)
if due and due < date.today() and _should_enforce():
raise cls(text)
warnings.warn(text, cls, stacklevel=stacklevel + 1)
@classmethod
def _format(
cls,
summary: str,
details: str,
due_date: Optional[date] = None,
see_url: Optional[str] = None,
format_args: Optional[dict] = None,
):
"""Private: reserved for ``setuptools`` internal use only"""
today = date.today()
summary = cleandoc(summary).format_map(format_args or {})
possible_parts = [
cleandoc(details).format_map(format_args or {}),
(
f"\nBy {due_date:%Y-%b-%d}, you need to update your project and remove "
"deprecated calls\nor your builds will no longer be supported."
if due_date and due_date > today
else None
),
(
"\nThis deprecation is overdue, please update your project and remove "
"deprecated\ncalls to avoid build errors in the future."
if due_date and due_date < today
else None
),
(f"\nSee {see_url} for details." if see_url else None),
]
parts = [x for x in possible_parts if x]
if parts:
body = indent(_TEMPLATE.format(details="\n".join(parts)), _INDENT)
return "\n".join([summary, "!!\n", body, "\n!!"])
return summary
# Path: python/python_env/Lib/site-packages/setuptools/warnings.py
class SetuptoolsDeprecationWarning(SetuptoolsWarning):
"""
Base class for warning deprecations in ``setuptools``
This class is not derived from ``DeprecationWarning``, and as such is
visible by default.
"""
# Path: python/python_env/Lib/site-packages/setuptools/config/_apply_pyprojecttoml.py
import logging
import os
from collections.abc import Mapping
from email.headerregistry import Address
from functools import partial, reduce
from itertools import chain
from types import MappingProxyType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Set,
Tuple,
Type,
Union,
cast,
)
from ..warnings import SetuptoolsWarning, SetuptoolsDeprecationWarning
from setuptools._importlib import metadata # noqa
from setuptools.dist import Distribution # noqa
from setuptools.config import expand
from setuptools.config import expand
from setuptools.extern.packaging.specifiers import SpecifierSet
from .._importlib import metadata
from setuptools.dist import Distribution
"""Translation layer between pyproject config and setuptools distribution and
metadata objects.
The distribution and metadata objects are modeled after (an old version of)
core metadata, therefore configs in the format specified for ``pyproject.toml``
need to be processed before being applied.
**PRIVATE MODULE**: API reserved for setuptools internal usage only.
"""
if TYPE_CHECKING:
EMPTY: Mapping = MappingProxyType({}) # Immutable dict-like
_Path = Union[os.PathLike, str]
_DictOrStr = Union[dict, str]
_CorrespFn = Callable[["Distribution", Any, _Path], None]
_Correspondence = Union[str, _CorrespFn]
_logger = logging.getLogger(__name__)
def apply(dist: "Distribution", config: dict, filename: _Path) -> "Distribution":
"""Apply configuration dict read with :func:`read_configuration`"""
if not config:
return dist # short-circuit unrelated pyproject.toml file
root_dir = os.path.dirname(filename) or "."
_apply_project_table(dist, config, root_dir)
_apply_tool_table(dist, config, filename)
current_directory = os.getcwd()
os.chdir(root_dir)
try:
dist._finalize_requires()
dist._finalize_license_files()
finally:
os.chdir(current_directory)
return dist
def _apply_project_table(dist: "Distribution", config: dict, root_dir: _Path):
project_table = config.get("project", {}).copy()
if not project_table:
return # short-circuit
_handle_missing_dynamic(dist, project_table)
_unify_entry_points(project_table)
for field, value in project_table.items():
norm_key = json_compatible_key(field)
corresp = PYPROJECT_CORRESPONDENCE.get(norm_key, norm_key)
if callable(corresp):
corresp(dist, value, root_dir)
else:
_set_config(dist, corresp, value)
def _apply_tool_table(dist: "Distribution", config: dict, filename: _Path):
tool_table = config.get("tool", {}).get("setuptools", {})
if not tool_table:
return # short-circuit
for field, value in tool_table.items():
norm_key = json_compatible_key(field)
if norm_key in TOOL_TABLE_DEPRECATIONS:
suggestion, kwargs = TOOL_TABLE_DEPRECATIONS[norm_key]
msg = f"The parameter `{norm_key}` is deprecated, {suggestion}"
| SetuptoolsDeprecationWarning.emit( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Summaw/genCraft-imageGen
# Path: modules/write/write.py
def write(text: str, case: str) -> None:
current_time = time.strftime("%H:%M:%S", time.localtime())
switcher = {
'info': _write_info,
'success': _write_success,
'error': _write_error
}
func = switcher.get(case.lower(), lambda x, y: None)
func(current_time, text)
# Path: modules/tasks/login.py
async def login_attempt():
headers = {
"Host": "api.gencraft.com",
"Connection": "keep-alive",
"Content-Length": "94",
"X-Csrf-Protection":"1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 OPR/102.0.0.0",
"Content-Type": "application/json",
"Accept": "application/json, text/plain, */*",
"X-WEB-TOKEN": "YOURWEBTOKENHERE (WILL AUTOMATE THIS AT A LATER DATE)",
"Origin": "https://gencraft.com",
"Referer": "https://gencraft.com/",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.9"
}
'''
"first_name": "your google account firstname",
"last_name": "your google account lastname",
"auth_provider": "google",
"timezone": "America/New_York" #or your timezone
'''
data = {
"first_name": "John",
"last_name": "Doe",
"auth_provider": "google",
"timezone": "America/New_York"
}
login_request = requests.post("https://api.gencraft.com/api/v5/user/login", headers=headers, json=data, timeout=20)
if "SESSION_ID" in login_request.cookies:
# Get the value of the "SESSION_ID" cookie
session_id = login_request.cookies["SESSION_ID"]
else:
write("SESSION_ID cookie not found in the response.", 'error')
if login_request.status_code == 200:
return session_id
else:
return 'False'
# Path: modules/tasks/generateImage.py
async def generate_image(sessionId):
time.sleep(2)
cookies = {
'SESSION_ID': f'{sessionId}',
}
headers = {
'authority': 'api.gencraft.com',
'accept': 'application json, text plain, */*',
'accept-language': 'en-US,en;q=0.9',
'content-type': 'application/json',
'origin': 'https://gencraft.com',
'referer': 'https://gencraft.com/',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 OPR/102.0.0.0',
'x-csrf-protection': '1',
}
# Model styles
# art_style 1 = 3D Style
# art_style 2 = Anime Style
# art_style 14 = CyberPunk Style
# art_style 9 = Realistic Style
# art_style 29 = Video Game Style
# art_style 17 = Isometric Style
json_data = {
'prompt_text': 'A blue and gold macaw chilling on a tree overviewing the rainforest', # Give the model a custom prompt here
'art_style_id': 9,
'negative_prompt_text': '',
'media_type': 'image',
'model_id': 1,
'width': 1024,
'height': 1024,
}
generate_image_request = requests.post('https://api.gencraft.com/api/v5/prompt/generate', cookies=cookies, headers=headers,
json=json_data, timeout=30)
print(generate_image_request.text)
if generate_image_request.status_code == 400:
write('Daily limit reached. Please use a different X-WEB-TOKEN to continue generating images', 'error')
else:
response_json = json.loads(generate_image_request.text)
if "data" in response_json and "images" in response_json["data"]:
images = response_json["data"]["images"]
image_urls = [image["url"] for image in images if "url" in image]
structured_data = {
"prompt": {
"prompt_text": json_data["prompt_text"],
"art_style_id": json_data["art_style_id"],
},
"urls": image_urls,
}
for url in image_urls:
write(f"Image Generated: {url}", "success")
with open("data/generated.json", "a") as json_file:
json_file.write("\n")
json.dump(structured_data, json_file, indent=4)
# Path: main.py
import time
import asyncio
import requests
from modules.write.write import write
from modules.tasks.login import login_attempt
from modules.tasks.generateImage import generate_image
async def start():
loginRequest = await login_attempt()
if loginRequest == 'False':
write("There was a problem logging in.", "error")
else:
write(f"Session ID: {loginRequest}", 'info')
| await generate_image(loginRequest)
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mentpy/mentpy
# Path: mentpy/gradients/_finite_difference.py
def fd_gradient(f, x, h=1e-5, type="central"):
if type not in ["central", "forward", "backward"]:
raise UserWarning(
f"Expected type to be 'central', 'forward', or 'backward' but {type} was given"
)
grad = np.zeros(len(x))
for i in range(len(x)):
if type == "central":
grad[i] = (f(x + h * np.eye(len(x))[i]) - f(x - h * np.eye(len(x))[i])) / (
2 * h
)
elif type == "forward":
grad[i] = (f(x + h * np.eye(len(x))[i]) - f(x)) / h
elif type == "backward":
grad[i] = (f(x) - f(x - h * np.eye(len(x))[i])) / h
return grad
# Path: mentpy/gradients/_finite_difference.py
def fd_hessian(f, x, h=1e-5, type="central"):
if type not in ["central", "forward", "backward"]:
raise UserWarning(
f"Expected type to be 'central', 'forward', or 'backward' but {type} was given"
)
hess = np.zeros((len(x), len(x)))
for i in range(len(x)):
for j in range(len(x)):
if type == "central":
hess[i, j] = (
f(x + h * np.eye(len(x))[i] + h * np.eye(len(x))[j])
- f(x + h * np.eye(len(x))[i] - h * np.eye(len(x))[j])
- f(x - h * np.eye(len(x))[i] + h * np.eye(len(x))[j])
+ f(x - h * np.eye(len(x))[i] - h * np.eye(len(x))[j])
) / (4 * h**2)
elif type == "forward":
hess[i, j] = (
f(x + h * np.eye(len(x))[i] + h * np.eye(len(x))[j])
- f(x + h * np.eye(len(x))[i])
- f(x + h * np.eye(len(x))[j])
+ f(x)
) / h**2
elif type == "backward":
hess[i, j] = (
f(x)
- f(x - h * np.eye(len(x))[i])
- f(x - h * np.eye(len(x))[j])
+ f(x - h * np.eye(len(x))[i] - h * np.eye(len(x))[j])
) / h**2
return hess
# Path: mentpy/gradients/_parameter_shift.py
def psr_gradient(cost, x, shift=1.5):
"""Calculate the gradient of a cost function using the parameter shift rule.
Args:
cost (callable): Cost function to calculate the gradient of.
x (array): Input to the cost function.
shift (float, optional): Shift to use in the parameter shift rule. Defaults to 1.5.
Returns:
array: Gradient of the cost function.
"""
grad = np.zeros(len(x))
for i in range(len(x)):
grad[i] = (
cost(x + shift * np.eye(len(x))[i]) - cost(x - shift * np.eye(len(x))[i])
) / (2 * shift)
return grad
# Path: mentpy/gradients/_parameter_shift.py
def psr_hessian(cost, x, shift=1.5):
"""Calculate the Hessian of a cost function using the parameter shift rule.
Args:
cost (callable): Cost function to calculate the Hessian of.
x (array): Input to the cost function.
shift (float, optional): Shift to use in the parameter shift rule. Defaults to 1.5.
Returns:
array: Hessian of the cost function.
"""
hess = np.zeros((len(x), len(x)))
for i in range(len(x)):
for j in range(len(x)):
hess[i, j] = (
cost(x + shift * np.eye(len(x))[i] + shift * np.eye(len(x))[j])
- cost(x + shift * np.eye(len(x))[i] - shift * np.eye(len(x))[j])
- cost(x - shift * np.eye(len(x))[i] + shift * np.eye(len(x))[j])
+ cost(x - shift * np.eye(len(x))[i] - shift * np.eye(len(x))[j])
) / (4 * shift**2)
return hess
# Path: mentpy/gradients/grad.py
import numpy as np
from ._finite_difference import fd_gradient, fd_hessian
from ._parameter_shift import psr_gradient, psr_hessian
# Copyright 2023 Luis Mantilla
#
# Licensed under the Apache License, Version 2.0.
# See <http://www.apache.org/licenses/LICENSE-2.0> for details.
"""Module that contains functions to calculate gradients of cost functions."""
__all__ = ["get_gradient", "get_hessian"]
def get_gradient(cost, x, method="parameter-shift", *args, **kwargs):
"""Calculate the gradient of a cost function.
Args:
cost (callable): Cost function to calculate the gradient of.
x (array): Input to the cost function.
method (str, optional): Method to use to calculate the gradient. Defaults to 'parameter-shift'.
Returns:
array: Gradient of the cost function.
"""
match method:
case "parameter-shift" | "psr" | "parametershift":
| return psr_gradient(cost, x, *args, **kwargs) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: rnag/cert-hero
# Path: cert_hero/cert_hero.py
def certs_please(
hostnames: list[str] | tuple[str] | set[str],
context: ssl.SSLContext = None,
num_threads: int = 25,
user_agent: str | None = _DEFAULT_USER_AGENT,
) -> dict[str, CertHero]:
"""
Retrieve (concurrently) the SSL certificate(s) for a list of ``hostnames`` - works
even in the case of expired or self-signed certificates.
Usage:
>>> import cert_hero, json
>>> host_to_cert = cert_hero.certs_please(['google.com', 'cnn.com', 'www.yahoo.co.in', 'youtu.be'])
>>> cert_hero.set_expired(host_to_cert)
>>> host_to_cert
{'google.com': CertHero(
{
"Cert Status": "SUCCESS",
"Serial": "753DD6FF20CB1B4510CB4C1EA27DA2EB",
...
}
), 'cnn.com': CertHero(
{
"Cert Status": "SUCCESS",
"Serial": "7F2F3E5C350554D71A6784CCFE6E8315",
...
}
), ...
}
>>> json.dumps(host_to_cert)
{"google.com": {"Cert Status": "SUCCESS", ...}, "cnn.com": {"Cert Status": "SUCCESS", ...}, ...}
:param hostnames: List of hosts to retrieve SSL Certificate(s) for
:param context: (Optional) Shared SSL Context
:param num_threads: Max number of concurrent threads
:param user_agent: A custom *user agent* to use for the HTTP call to retrieve ``Location`` and ``Status``.
Defaults to ``python-requests/{version}``, or a random *user agent* if the ``fake_useragent`` module
is installed (via the ``fake-ua``
`extra <https://packaging.python.org/en/latest/tutorials/installing-packages/#installing-extras>`__).
:return: A mapping of ``hostname`` to the SSL Certificate (e.g. :class:`CertHero`) for that host
"""
if context is None:
context = create_ssl_context()
if num_hosts := len(hostnames):
# We can use a with statement to ensure threads are cleaned up promptly
with ThreadPoolExecutor(
max_workers=min(num_hosts, num_threads)
) as pool:
_host_to_cert = {
# TODO: Update to remove `or` once we finalize how to handle missing certs
host: cert_info or _build_failed_cert('TIMED_OUT')
for host, cert_info in zip(
hostnames,
pool.map(
cert_please,
hostnames,
repeat(context),
repeat(user_agent),
),
)
}
else:
_host_to_cert = {}
return _host_to_cert
# Path: cert_hero/cert_hero.py
def set_expired(certs: CertHero
| dict[str, str | int | dict[str, str | bool]]
| dict[str, CertHero]
| dict[str, dict[str, str | int | dict[str, str | bool]]]
| Iterable[CertHero]
| Iterable[dict[str, str | int | dict[str, str | bool]]]
| None,
_date_from_iso_str=date.fromisoformat) -> None:
"""
Set or update the value for ``Validity > Expired`` (:type:`bool`) on
each cert in a response from :func:`cert_please()` or :func:`certs_please()`,
or a serialized version thereof (e.g. ``json.dumps`` > ``json.loads``).
Example Usage::
>>> from cert_hero import cert_please, set_expired
>>> cert = cert_please('google.com')
>>> assert 'Expired' not in cert['Validity']
>>> set_expired(cert)
>>> assert 'Expired' in cert['Validity']
"""
if not certs:
return
# cert_please(): given a `CertHero` (or `CertHero`-like) object
if 'Serial' in certs:
certs = [certs]
# certs_please(): given a mapping of `hostname` to `CertHero` (or `CertHero`-like) object
elif values_fn := getattr(certs, 'values', None):
certs = values_fn()
today = datetime.utcnow().date()
for _cert in certs:
if _cert:
if _validity := _cert.get('Validity'):
# Use cached attribute `not_after_date` if available (CertHero),
# else we calculate it on the fly in case of a `dict`.
not_after_date: date = getattr(_cert, '_not_after_date', None) \
or _date_from_iso_str(_validity['Not After'])
# Set the `Validity > Expired` value (bool)
_validity['Expired'] = not_after_date < today
# Path: cert_hero/cli.py
import argparse
import sys
from . import certs_please, set_expired
"""Console script for cert_hero."""
def main():
"""Console script for cert_hero."""
parser = argparse.ArgumentParser(prog='ch', description='Retrieve the SSL certificate(s) for one or more given host')
parser.add_argument('hosts', nargs='*')
args = parser.parse_args()
host_to_cert = certs_please(args.hosts)
| set_expired(host_to_cert) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: KosinskiLab/pyTME
# Path: tme/matching_utils.py
def rigid_transform(
coordinates: NDArray,
rotation_matrix: NDArray,
out: NDArray,
translation: NDArray,
use_geometric_center: bool = False,
coordinates_mask: NDArray = None,
out_mask: NDArray = None,
center: NDArray = None,
) -> None:
"""
Apply a rigid transformation (rotation and translation) to given coordinates.
Parameters
----------
coordinates : NDArray
An array representing the coordinates to be transformed [d x N].
rotation_matrix : NDArray
The rotation matrix to be applied [d x d].
translation : NDArray
The translation vector to be applied [d].
out : NDArray
The output array to store the transformed coordinates.
coordinates_mask : NDArray, optional
An array representing the mask for the coordinates [d x T].
out_mask : NDArray, optional
The output array to store the transformed coordinates mask.
use_geometric_center : bool, optional
Whether to use geometric or coordinate center.
Returns
-------
None
"""
coordinate_dtype = coordinates.dtype
center = coordinates.mean(axis=1) if center is None else center
if not use_geometric_center:
coordinates = coordinates - center[:, None]
np.matmul(rotation_matrix, coordinates, out=out)
if use_geometric_center:
axis_max, axis_min = out.max(axis=1), out.min(axis=1)
axis_difference = axis_max - axis_min
translation = np.add(translation, center - axis_max + (axis_difference // 2))
else:
translation = np.add(translation, np.subtract(center, out.mean(axis=1)))
out += translation[:, None]
if coordinates_mask is not None and out_mask is not None:
if not use_geometric_center:
coordinates_mask = coordinates_mask - center[:, None]
np.matmul(rotation_matrix, coordinates_mask, out=out_mask)
out_mask += translation[:, None]
if not use_geometric_center and coordinate_dtype != out.dtype:
np.subtract(out.mean(axis=1), out.astype(int).mean(axis=1), out=translation)
out += translation[:, None]
# Path: tme/matching_utils.py
def euler_to_rotationmatrix(angles: Tuple[float]) -> NDArray:
"""
Convert Euler angles to a rotation matrix.
Parameters
----------
angles : tuple
A tuple representing the Euler angles in degrees.
Returns
-------
NDArray
The generated rotation matrix.
"""
if len(angles) == 1:
angles = (angles, 0, 0)
rotation_matrix = (
Rotation.from_euler("zyx", angles, degrees=True).as_matrix().astype(np.float32)
)
return rotation_matrix
# Path: tme/matching_optimization.py
from typing import Tuple, Dict
from abc import ABC, abstractmethod
from numpy.typing import NDArray
from scipy.optimize import (
differential_evolution,
LinearConstraint,
basinhopping,
)
from scipy.ndimage import laplace
from scipy.spatial import KDTree
from .matching_utils import rigid_transform, euler_to_rotationmatrix
import numpy as np
""" Implements various methods for non-exhaustive template matching
based on numerical optimization.
Copyright (c) 2023 European Molecular Biology Laboratory
Author: Valentin Maurer <valentin.maurer@embl-hamburg.de>
"""
class MatchCoordinatesToDensity(ABC):
"""
A class to template match coordinate sets.
Parameters
----------
target_coordinates : NDArray
The coordinates of the target.
template_coordinates : NDArray
The coordinates of the template.
target_weights : NDArray
The weights of the target.
template_weights : NDArray
The weights of the template.
sampling_rate : NDArray
The size of the voxel.
template_mask_coordinates : NDArray, optional
The coordinates of the template mask. Default is None.
target_mask_coordinates : NDArray, optional
The coordinates of the target mask. Default is None.
**kwargs : dict, optional
Other keyword arguments.
"""
def __init__(
self,
target_coordinates: NDArray,
template_coordinates: NDArray,
target_weights: NDArray,
template_weights: NDArray,
sampling_rate: NDArray,
template_mask_coordinates: NDArray = None,
target_mask_coordinates: NDArray = None,
**kwargs,
):
target, _, origin = FitRefinement.array_from_coordinates(
target_coordinates, target_weights, sampling_rate
)
self.target_density = target
self.target_origin = origin
self.sampling_rate = sampling_rate
self.template_weights = template_weights
self.template_coordinates = template_coordinates
self.template_coordinates_rotated = np.empty(
self.template_coordinates.shape, dtype=np.float32
)
self.target_mask_density = None
if target_mask_coordinates is not None:
target_mask, *_ = FitRefinement.array_from_coordinates(
coordinates=target_mask_coordinates.astype(np.float32),
weights=np.ones(target_mask_coordinates.shape[1]),
shape=self.target_density.shape,
origin=self.target_origin,
sampling_rate=self.sampling_rate,
)
self.target_mask_density = target_mask
self.template_mask_coordinates = None
self.template_mask_coordinates_rotated = None
if template_mask_coordinates is not None:
self.template_mask_coordinates = template_mask_coordinates
self.template_mask_coordinates_rotated = np.empty(
self.template_mask_coordinates.shape, dtype=np.float32
)
def __call__(self, x: NDArray):
"""
Return the score for a given transformation.
Parameters
----------
x : NDArray
The input transformation parameters.
Returns
-------
float
The negative score from the scoring function.
"""
translation, rotation = x[:3], x[3:]
rotation_matrix = euler_to_rotationmatrix(rotation)
| rigid_transform( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: hookla/DreamTeamGPT
# Path: dream_team_gpt/meeting.py
class Meeting:
idea: str
config: Path = None
def __post_init__(self) -> None:
"""Create agents"""
client_factory = ai_client_factory(
AIClientConfig(
client_type=AIClientType.ChatGPT,
model=Models.GPT4,
api_key=os.environ["openai.api_key"],
)
)
if self.config:
sme_dict = parse_yaml_config(self.config)
else:
sme_dict = DEFAULT_SME_DICT
self.smes = [SME(client_factory=client_factory, **d) for d in sme_dict]
self.chairman = Chairman(client_factory, self.smes)
self.refiner = IdeaRefiner(client_factory, "Refiner")
def run(self) -> None:
"""Run the meeting to discuss the idea"""
transcript = Transcript(self.idea)
print_with_wrap(transcript)
refined_idea = self.refiner.refine_idea(self.idea)
transcript.refined_idea = refined_idea
print_with_wrap(refined_idea)
while not self.chairman.decide_if_meeting_over(transcript):
self.run_discussion_round(transcript)
def run_discussion_round(self, transcript: str) -> None:
logger.info("running next discussion round\n")
speaker: SME = self.chairman.decide_next_speaker(transcript)
opinion = speaker.opinion(transcript)
print_with_wrap(f"\033[94m{speaker.name}\033[0m: {opinion}\n")
if opinion.strip().rstrip(".").upper() != NO_COMMENT:
transcript += opinion
# Path: dream_team_gpt/utils/logging.py
def configure_logging(verbose: int = 0) -> None:
logging_levels = {0: "ERROR", 1: "INFO", 2: "DEBUG"}
logger.remove(0)
logger.add(sys.stdout, level=logging_levels.get(verbose, "ERROR"))
logger.add("dream_team_gpt.log", level="DEBUG")
# Path: dream_team_gpt/main.py
from dataclasses import dataclass
from pathlib import Path
from dotenv import load_dotenv
from dream_team_gpt.meeting import Meeting
from dream_team_gpt.utils import configure_logging
import os
import click
@click.command()
@click.option(
"--idea",
"-i",
type=str,
required=True,
help="your idea for the team to discuss. Please use double quotes",
)
@click.option(
"--config",
"-c",
type=click.Path(exists=True),
default=None,
help="yaml file with team personalities details",
)
@click.option("-v", "--verbose", default=1, count=True)
def run_meeting(idea: str, config: Path = None, verbose: int = 1) -> None:
print(idea)
| configure_logging(verbose) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: amrahhh/sqla_async_orm_queries
# Path: sqla_async_orm_queries/models.py
class Model(Base):
__abstract__ = True
@classmethod
async def create(cls, data: dict):
async with SessionLocal() as session:
try:
data = cls(**data)
session.add(data)
await session.commit()
return data
except Exception as e:
await session.rollback()
raise e
@classmethod
async def select_one(cls, *args: BinaryExpression):
async with SessionLocal() as session:
result = await session.execute(select(cls).where(*args))
data = result.scalar()
return data
@classmethod
async def select_all(cls, *args: BinaryExpression):
async with SessionLocal() as session:
result = await session.execute(select(cls).where(*args))
data = result.scalars().all()
return data
@classmethod
async def update(cls, data: dict, *args: BinaryExpression):
async with SessionLocal() as session:
try:
query = update(cls).where(*args).values(**data).returning(cls.id)
db_data = await session.execute(query)
db_data = db_data.scalar()
await session.commit()
return db_data
except Exception as e:
await session.rollback()
raise e
@classmethod
async def delete(cls, *args: BinaryExpression):
async with SessionLocal() as session:
try:
query = delete(cls).where(*args)
db_data = await session.execute(query)
await session.commit()
return db_data
except Exception as e:
await session.rollback()
raise e
@classmethod
async def select_with_pagination(
cls, *args: BinaryExpression, page: int = 1, size: int = 10
):
async with SessionLocal() as session:
query = select(cls).where(*args).offset((page - 1) * size).limit(size)
result = await session.execute(query)
data = result.scalars().all()
return data
async def apply(self):
async with SessionLocal() as session:
try:
session.add(self)
await session.commit()
except Exception as e:
await session.rollback()
raise e
@classmethod
async def apply_all(self, models: List[TModels]):
async with SessionLocal() as session:
try:
session.add_all(models)
await session.commit()
except Exception as e:
await session.rollback()
raise e
# Path: sqla_async_orm_queries/models.py
def init_session(session: AsyncSession):
global SessionLocal, INITIALIZED
if isinstance(session, (async_sessionmaker, sessionmaker)) and issubclass(
session.class_, AsyncSession
):
SessionLocal = session
INITIALIZED = True
return True
raise TypeError("You need to use SQLAlchemy `AsyncSession`")
# Path: examples/test.py
import asyncio
from sqlalchemy import Column, String, Integer, and_
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker
from sqla_async_orm_queries import Model, init_session
# create your engine
engine = create_async_engine(
"postgresql+asyncpg://test_user:12345@localhost/test_db",
echo=True,
)
# create your SessionLocal
SessionLocal = async_sessionmaker(
expire_on_commit=True,
class_=AsyncSession,
bind=engine,
)
| class Test(Model): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: MeetingAgent/MeetingAgent-Core
# Path: voice_cloning/clone.py
class MyTTS:
def __init__(self):
# Get device
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.tts = TTS("tts_models/en/ljspeech/tacotron2-DDC")
self.use_default_speaker = False
self.speaker_wav = self._get_speaker()
def _get_speaker(self):
# speaker audio file
wav_files = glob.glob("voice_cloning/audio_samples/*.wav")
print("WAV FILES: ", wav_files)
if wav_files:
if self.use_default_speaker:
wav_file = "voice_cloning/audio_samples/default_audio.wav"
else:
wav_file = wav_files[0] if wav_files[0] != "default_audio.wav" else FileNotFoundError("Add your audio.wav to /voice_cloning/audio_samples")
print("WAV FILE: ", wav_file)
return wav_file
def text_to_speech(self, text, output_file):
self.tts.tts_with_vc_to_file(
text,
speaker_wav=self.speaker_wav,
file_path=output_file
)
# Path: meeting_buddy_system/gpt_utils.py
def gpt_4_answer(
messages,
model="gpt-4",
max_tokens=750,
temperature=0.6,
top_p=0.9,
frequency_penalty=1.2,
presence_penalty=0.5,
):
completion_params = {
"model": model,
"messages": messages,
"temperature": temperature,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
"max_tokens": max_tokens,
}
response = openai.ChatCompletion.create(**completion_params)
return response["choices"][0]["message"]["content"]
# Path: meeting_buddy_system/gpt_utils.py
def gpt_3_5_turbo_16k_answer(
messages,
model="gpt-3.5-turbo-16k",
max_tokens=750,
temperature=0.6,
top_p=0.9,
frequency_penalty=1.2,
presence_penalty=0.5,
):
completion_params = {
"model": model,
"messages": messages,
"temperature": temperature,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
"max_tokens": max_tokens,
}
response = openai.ChatCompletion.create(**completion_params)
return response["choices"][0]["message"]["content"]
# Path: meeting_buddy_system/prompts.py
MEETING_BUDDY_MAIN_PROMPT = """
<!-->IMPORTANT CONTEXT<--!>
An answer should be coherent and include some point form arguments.
<!-->IMPORTANT CONTEXT<--!>
Here is context for the meeting: {meeting_context}
Given a question, answer it coherently and several possible points that can be derived from the question.
If the question is simple, like an arithmetic question, no need to further explain any detail. Just give the result with a short explanation of how it was achieved it.
"""
# Path: meeting_buddy_system/prompts.py
EXTRACT_QUERY_PROMPT = """
Given some input text, extract a query from the text. You are to do this in the language of the text.
If no query exists, interpret the text as is and see if a question can be captured from it.
"""
# Path: meeting_buddy.py
import pyaudio
import wave
import whisper
import threading
import time
import pygame
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.switch import Switch
from kivy.uix.label import Label
from kivy.clock import Clock
from kivy.uix.textinput import TextInput
from kivy.core.window import Window
from kivy.support import install_twisted_reactor
from gtts import gTTS
from pydub import AudioSegment
from ftlangdetect import detect
from voice_cloning.clone import MyTTS
from meeting_buddy_system.gpt_utils import gpt_4_answer, gpt_3_5_turbo_16k_answer
from meeting_buddy_system.prompts import MEETING_BUDDY_MAIN_PROMPT, EXTRACT_QUERY_PROMPT
# Audio Processing
# GUI
install_twisted_reactor()
# gtts text to speech
# personalized voice text to speech
# Local
recording = False
audio_thread = None
def get_audio() -> None:
global recording
recording = True
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=44100, input=True, frames_per_buffer=1024)
frames = []
try:
print("Recording...")
while recording:
data = stream.read(1024)
frames.append(data)
print("Finished recording.")
finally:
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open('meeting_buddy_audio/input_audio.wav', 'wb')
wf.setnchannels(1)
wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
wf.setframerate(44100)
wf.writeframes(b''.join(frames))
wf.close()
def stop_audio() -> None:
global recording
recording = False
def whisper_process_audio(audio_file: str) -> str:
model = whisper.load_model("base") # for multilingual
result = model.transcribe(audio_file)
return result["text"]
def detect_language(text: str) -> str:
cleaned_text = text.replace('\n', ' ')
return detect(text=cleaned_text, low_memory=True)
def gtts_text_to_speech(text: str, output_file='meeting_buddy_audio/output.mp3') -> None:
language = detect_language(text=text)["lang"]
tts = gTTS(text=text, lang=language, slow=False)
tts.save(output_file)
print(f'Audio saved as {output_file}')
def voice_clone_text_to_speech(text: str, output_file='meeting_buddy_audio/output.wav') -> None:
app.tts.text_to_speech(text, output_file)
print(f'Audio saved as {output_file}')
# initialize mixer
pygame.mixer.init()
def play_audio(file_path):
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
def stop_audio_playback():
pygame.mixer.music.stop()
def gpt_pipeline(meeting_context: str, input_text: str) -> str:
"""
Extract query from text and produce the final answer to query.
"""
print("\n\n\n###### EXTRACTING QUERY FROM TEXT ######\n\n\n")
messages = [{"role": "system", "content": EXTRACT_QUERY_PROMPT}, {"role": "user", "content": input_text}]
| query = gpt_3_5_turbo_16k_answer(messages=messages) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: KaichengGroup/FUSE-Flow
# Path: FUSE_Flow/other_modules/utils.py
class AEInit(str, Enum):
zero = 'zero'
xavier = 'xavier'
@classmethod
def get_values(cls):
return tuple(map(lambda c: c.value, cls))
# Path: FUSE_Flow/other_modules/conv_modules/conv_block.py
class ConvBlock(nn.Module):
def __init__(self, conv, c_in, c_out, kernel_size, stride, padding, init, attention_type, attn_red_ratio):
super().__init__()
self.block = nn.Sequential(
nn.BatchNorm2d(c_in),
nn.LeakyReLU(negative_slope=0.2),
conv(c_in, c_out, kernel_size=kernel_size, stride=stride, padding=padding),
)
# initialize weights and biases
if init == AEInit.zero:
self.block[-1].weight.data.zero_()
self.block[-1].bias.data.zero_()
elif init == AEInit.xavier:
for name, param in self.block.named_parameters():
if name.endswith('.bias'):
param.data.fill_(0)
elif name.endswith('.weight'):
if len(param.shape) >= 2:
bound = math.sqrt(6) / math.sqrt(param.shape[0] + param.shape[1])
param.data.uniform_(-bound, bound)
def forward(self, x):
return self.block(x)
# Path: FUSE_Flow/other_modules/gated_resnet.py
class UpsampleBlock(GatedResidualNetBase):
def __init__(self, c_in, c_out, c_hid, n_layers, init, attention_type, attn_red_ratio):
super().__init__(c_in, c_out, c_hid, n_layers, nn.ConvTranspose2d,
4, 2, 1, init, attention_type, attn_red_ratio)
def forward(self, x):
return self.nn(x)
# Path: FUSE_Flow/other_modules/gated_resnet.py
class DownsampleBlock(GatedResidualNetBase):
def __init__(self, c_in, c_out, c_hid, n_layers, init, attention_type, attn_red_ratio):
super().__init__(c_in, c_out, c_hid, n_layers, nn.Conv2d,
4, 2, 1, init, attention_type, attn_red_ratio)
def forward(self, x):
return self.nn(x)
# Path: FUSE_Flow/other_modules/adaptive_unet.py
import math
import pytorch_lightning as pl
import torch
from torch import nn
from FUSE_Flow.other_modules.utils import AEInit
from .conv_modules.conv_block import ConvBlock
from .gated_resnet import UpsampleBlock, DownsampleBlock
class AdaptiveUNet(pl.LightningModule):
"""SR network architecture that uses Residual-in-Residual Dense Blocks.
Implement Figure (3) in ESRGAN paper.
Parameters
----------
d_x : int
Priority dimension (height or width) of input chosen for downstream comparisons.
d_y : int
Priority dimension (height or width) of output chosen for downstream comparisons.
add_depth : int
Additional depth on top of that required based on difference in scale of input and output.
Largest value this value can take is the largest n where input_shape[1]/factor**n is whole and odd.
factor: int
Factor at which data expands or shrinks. Currently only works for factor = 2.
c_in : int
Number of channels of input tensor.
c_hid : int
Number of channels of inner convolutional layers.
n_conv : int
Number of conv layers.
no_skip : bool
To include skip connection between mirrored layers.
attention_type: AttentionType
type of attention implemented in gated conv blocks
attn_red_ratio : float # default 16
Minimum value = 1, Maximum value = c_in, set reduction from 1 to c_in using attn_red_ratio
Smaller attn_red_ratio --> Less Parameters
Hyperparameter to vary capacity and computational cost of SE blocks in the network.
"""
def __init__(self, d_x, d_y, add_depth, factor, c_in, c_hid, n_conv, no_skip,
attention_type, attn_red_ratio):
super().__init__()
self.save_hyperparameters()
self.no_skip = no_skip
# double the number of channels needed if no skip connection
if no_skip:
c_inter = c_hid
else:
c_inter = c_hid//2
# larger of the input and output priority dimension
d_l = max(d_x, d_y)
# larger of the input and output priority dimension
d_s = min(d_x, d_y)
# scale difference between input and output
scale = int(d_l / d_s)
# max depth of U-Net
max_depth = int(math.log(scale, factor) + 1 + add_depth)
# represents dimension size of unwanted depths
denominator = d_l // (factor ** (max_depth - 1))
# number of down-sampling blocks
n_down = math.floor(math.log(d_x / denominator, factor))
# number of up-sampling layers in encoder
n_enc_up = max_depth - 1 - n_down - math.ceil(math.log(scale, factor) % 1)
# number of up-sampling layers in decoder
n_dec_up = math.floor(math.log(d_y / denominator, factor))
# discrepancy between size of input priority dimension and nearest larger multiple of 2
k_up = d_l // (factor ** math.floor(math.log(scale, factor))) - d_s
# discrepancy between size of input priority dimension and nearest smaller multiple of 2
k_down = d_s - d_l // (factor ** math.ceil(math.log(scale, factor)))
# need resizing if data is not multiple of 2
self.need_resizing = k_up or k_down
# encoder
if not no_skip:
c_up = c_inter // (factor ** (n_down+self.need_resizing))
self.up_resizer = nn.Sequential(
*[ConvBlock(nn.ConvTranspose2d, c_in, c_up,
| 3, 1, 1, AEInit.xavier, attention_type, attn_red_ratio)] + |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zytedata/zyte-spider-templates
# Path: zyte_spider_templates/documentation.py
def document_enum(func):
return func
# Path: zyte_spider_templates/spiders/base.py
class BaseSpider(scrapy.Spider):
custom_settings: Dict[str, Any] = {
"ZYTE_API_TRANSPARENT_MODE": True,
"_ZYTE_API_USER_AGENT": f"zyte-spider-templates/{version('zyte-spider-templates')}",
}
metadata: Dict[str, Any] = {
"template": True,
"title": "Base",
"description": "Base template.",
}
_NEXT_PAGE_PRIORITY: int = 100
@classmethod
def from_crawler(cls, crawler: Crawler, *args, **kwargs) -> scrapy.Spider:
spider = super().from_crawler(crawler, *args, **kwargs)
spider.allowed_domains = [parse_url(spider.args.url).netloc]
if spider.args.geolocation:
# We set the geolocation in ZYTE_API_PROVIDER_PARAMS for injected
# dependencies, and in ZYTE_API_AUTOMAP_PARAMS for page object
# additional requests.
for component in ("AUTOMAP", "PROVIDER"):
default_params = spider.settings.getdict(f"ZYTE_API_{component}_PARAMS")
default_params["geolocation"] = spider.args.geolocation
spider.settings.set(
f"ZYTE_API_{component}_PARAMS",
default_params,
priority=ARG_SETTING_PRIORITY,
)
if spider.args.max_requests:
spider.settings.set(
"ZYTE_API_MAX_REQUESTS",
spider.args.max_requests,
priority=ARG_SETTING_PRIORITY,
)
return spider
# Path: zyte_spider_templates/spiders/base.py
class BaseSpiderParams(BaseModel):
url: str = Field(
title="URL",
description="Initial URL for the crawl.",
pattern=r"^https?:\/\/[^:\/\s]+(:\d{1,5})?(\/[^\s]*)*(#[^\s]*)?$",
)
geolocation: Optional[Geolocation] = Field(
title="Geolocation",
description="ISO 3166-1 alpha-2 2-character string specified in "
"https://docs.zyte.com/zyte-api/usage/reference.html#operation/extract/request/geolocation.",
default=None,
json_schema_extra={
"enumMeta": {
code: {
"title": GEOLOCATION_OPTIONS_WITH_CODE[code],
}
for code in Geolocation
}
},
)
max_requests: Optional[int] = Field(
description=(
"The maximum number of Zyte API requests allowed for the crawl.\n"
"\n"
"Requests with error responses that cannot be retried or exceed "
"their retry limit also count here, but they incur in no costs "
"and do not increase the request count in Scrapy Cloud."
),
default=100,
json_schema_extra={
"widget": "request-limit",
},
)
# Path: zyte_spider_templates/spiders/ecommerce.py
from enum import Enum
from typing import Any, Callable, Dict, Iterable, Optional, Union
from pydantic import Field
from scrapy import Request
from scrapy.crawler import Crawler
from scrapy_poet import DummyResponse
from scrapy_spider_metadata import Args
from zyte_common_items import ProbabilityRequest, Product, ProductNavigation
from zyte_spider_templates.documentation import document_enum
from zyte_spider_templates.spiders.base import BaseSpider, BaseSpiderParams
import scrapy
@document_enum
class EcommerceCrawlStrategy(str, Enum):
full: str = "full"
"""Follow most links within the domain of URL in an attempt to discover and
extract as many products as possible."""
navigation: str = "navigation"
"""Follow pagination, subcategories, and product detail pages."""
pagination_only: str = "pagination_only"
"""Follow pagination and product detail pages. SubCategory links are
ignored. Use this when some subCategory links are misidentified by
ML-extraction."""
@document_enum
class ExtractFrom(str, Enum):
httpResponseBody: str = "httpResponseBody"
"""Use HTTP responses. Cost-efficient and fast extraction method, which
works well on many websites."""
browserHtml: str = "browserHtml"
"""Use browser rendering. Often provides the best quality."""
| class EcommerceSpiderParams(BaseSpiderParams): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Bio-OS/bio-mate
# Path: bio_mate/defs.py
def gen_data_url_img(img_path: Path):
base64_utf8_str = base64.b64encode(img_path.read_bytes()).decode("utf-8")
ext = str(img_path).split(".")[-1]
data_url = f"data:image/{ext};base64,{base64_utf8_str}"
return data_url
# Path: bio_mate/defs.py
def get_img(type):
img_data_url = all_defs[type]["sample_img"]
if img_data_url:
return img_data_url
img_name = all_defs[type]["meta"]["sample_img"]
if not img_name:
return
img_path = plot_defs / type / img_name
data_url = gen_data_url_img(img_path)
all_defs[type]["sample_img"] = data_url
return data_url
# Path: bio_mate/defs.py
def list_files(path: str):
custom_path = Path(path)
if not custom_path.exists():
print(f"{path} not exists")
return
return [
{"name": item.name, "is_dir": item.is_dir()} for item in custom_path.iterdir()
]
# Path: bio_mate/defs.py
def prepare_plot_env(params: dict):
now = datetime.utcnow()
time_str = now.strftime("%Y%m%d_%H%M%S_%f")
current_plot = current_file.parent / "log_plot" / time_str
current_plot.mkdir(exist_ok=True, parents=True)
input_json = current_plot / "input.json"
input_json.write_text(json.dumps(params, indent=2))
return current_plot
# Path: bio_mate/BaseWidget.py
from ipywidgets import DOMWidget
from traitlets import Bool, Unicode, Dict, Int
from bio_mate.defs import gen_data_url_img, get_img, list_files, prepare_plot_env
import json
import warnings
import subprocess
module_name = "bio-mate"
module_version = "1.0.0"
class BaseWidget(DOMWidget):
_model_name = Unicode("BaseWidgetModel").tag(sync=True)
_model_module = Unicode(module_name).tag(sync=True)
_model_module_version = Unicode(module_version).tag(sync=True)
_view_name = Unicode("BaseWidgetView").tag(sync=True)
_view_module = Unicode(module_name).tag(sync=True)
_view_module_version = Unicode(module_version).tag(sync=True)
_view_count = Int(0).tag(sync=True)
type = Unicode("").tag(sync=True)
count = Int(100).tag(sync=True)
all_defs = Dict().tag(sync=True)
def handle_messages(self, widget, content: dict, buffers):
reqId = content.get("reqId", "")
method_name = content.get("method", "")
if not reqId or not method_name:
print(f"Invalid CommRequest: reqId: {reqId}-{method_name}")
return
if not hasattr(self, method_name):
content["response"] = {"status": "failed", "msg": "NotImplementedError"}
self.send(content)
return
func = getattr(self, method_name)
func(content)
def __init__(self, **kwargs):
super(BaseWidget, self).__init__(**kwargs)
# Assign keyword parameters to this object
recognized_keys = dir(self.__class__)
for key, value in kwargs.items():
if key not in recognized_keys and f"_{key}" not in recognized_keys:
warnings.warn(RuntimeWarning(f"Keyword parameter {key} not recognized"))
setattr(self, key, value)
# Attach the callback event handler
self.on_msg(self.handle_messages)
def getSampleImage(self, content: dict):
| content["response"] = {"status": "ok", "result": get_img(self.type)} |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: iamarunbrahma/llm-prompt-testing
# Path: utils.py
@retry(wait=wait_random_exponential(min=3, max=90), stop=stop_after_attempt(6))
def get_embeddings(text, embedding_model="text-embedding-ada-002"):
response = openai.Embedding.create(
model=embedding_model,
input=text,
)
embedding_vectors = response["data"][0]["embedding"]
return embedding_vectors
# Path: utils.py
@retry(wait=wait_random_exponential(min=3, max=90), stop=stop_after_attempt(6))
def get_chat_completion(config, system_prompt, question):
try:
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": question},
]
response = openai.ChatCompletion.create(
model=config["model_name"],
messages=messages,
temperature=config["temperature"],
max_tokens=config["max_tokens"],
top_p=config["top_p"],
frequency_penalty=config["frequency_penalty"],
presence_penalty=config["presence_penalty"],
)
answer = response["choices"][0]["message"]["content"]
answer = answer.strip()
return answer
except OpenAIError as e:
func_name = traceback.extract_stack()[-1].name
st.error(f"Error in {func_name}:\n{type(e).__name__}=> {str(e)}")
# Path: metrics.py
from collections import Counter
from numpy.linalg import norm
from utils import get_embeddings, get_chat_completion
import evaluate
import streamlit as st
import traceback
import numpy as np
class Metrics:
def __init__(self, question, context, answer, config, strictness=1):
self.question = question
self.context = context
self.answer = answer
self.strictness = strictness
config["model_name"] = "gpt-3.5-turbo"
self.config = config
def rouge_score(self):
try:
if not self.answer or not self.context:
raise ValueError(
"Please provide both context and answer to generate Rouge Score."
)
rouge = evaluate.load("rouge")
results = rouge.compute(predictions=self.answer, references=self.context)
rouge1 = np.round(results["rouge1"], 3)
rouge2 = np.round(results["rouge2"], 3)
rougeL = np.round(results["rougeL"], 3)
return rouge1, rouge2, rougeL
except Exception as e:
func_name = traceback.extract_stack()[-1].name
st.error(f"Error in {func_name}: {str(e)}")
def bleu_score(self):
try:
if not self.answer or not self.context:
raise ValueError(
"Please provide both context and answer to generate BLEU Score."
)
bleu = evaluate.load("bleu")
results = bleu.compute(predictions=self.answer, references=self.context)
return np.round(results["bleu"], 3)
except Exception as e:
func_name = traceback.extract_stack()[-1].name
st.error(f"Error in {func_name}: {str(e)}")
def bert_score(self):
try:
if not self.answer or not self.context:
raise ValueError(
"Please provide both context and answer to generate BLEU Score."
)
bertscore = evaluate.load("bertscore")
results = bertscore.compute(
predictions=self.answer,
references=self.context,
lang="en",
model_type="distilbert-base-uncased",
)
return np.round(results["f1"], 3)
except Exception as e:
func_name = traceback.extract_stack()[-1].name
st.error(f"Error in {func_name}: {str(e)}")
def answer_relevancy(self):
try:
if not self.answer or not self.question:
raise ValueError(
"Please provide both question and answer to generate Answer Relevancy Score."
)
relevancy_prompt = """
Generate question for the given answer.
Here are few examples:
Answer: The first ODI Cricket World Cup was held in 1975, and the West Indies cricket team won the tournament. Clive Lloyd was the captain of the winning West Indies team. They defeated Australia in the final to become the first-ever ODI Cricket World Cup champions.
Question: Which team won the first ODI Cricket World Cup and in which year? Who was the captain of the winning team?
Answer: The first president of the United States of America was George Washington. He became president in the year 1789. Washington served as the country's first president from April 30, 1789, to March 4, 1797.
Question: Who was the first president of the United States of America and in which year did he become president?
Using the answer provided below, generate a question which is relevant to the answer.
"""
answer_relevancy_score = []
for _ in range(self.strictness):
generated_question = get_chat_completion(
self.config, relevancy_prompt, self.answer
)
| question_vec = np.asarray(get_embeddings(self.question.strip()))
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AVAniketh0905/fluidspy
# Path: fluidspylib/fluidspy/numerical/boundary/composite.py
class CompositeBoundary:
children: List[Direction]
def __init__(self, *args) -> None:
self.children = list(args)
def init_apply(self):
for child in self.children:
child.init_apply()
def apply(self):
for child in self.children:
child.apply()
# Path: fluidspylib/fluidspy/numerical/dim/dimension.py
class Dimension(ABC):
"""Abstract class for dimensions."""
initial_conditions: SimulationState
def __init__(self, initial_conditions: SimulationState) -> None:
self.initial_conditions = initial_conditions
@abstractmethod
def create_grid(
self, num_points: Union[int, Tuple[int, int]], base_value: float = 0.0
):
init_state = np.zeros(num_points, dtype=float)
init_state.fill(base_value)
self.initial_conditions.set_state(init_state)
@abstractmethod
def convolution():
pass
# Path: fluidspylib/fluidspy/numerical/material_properties/material.py
class MaterialProperties:
"""Material properties.
Args:
name (str): Material name.
density (float): Material density.(kg/m^3)
specific_heat (float): Material specific heat.(J/kg.K)
prandtl (float): Material Prandtl number.
"""
name: str
density: float
specific_heat: float
prandtl: float
# Path: fluidspylib/fluidspy/numerical/material_properties/material.py
class ThermalProperties(MaterialProperties):
"""Thermal properties.
Args:
name (str): Material name.
density (float): Material density.(kg/m^3)
specific_heat (float): Material specific heat.(J/kg.K)
prandtl (float): Material Prandtl number.
thermal_conductivity (float): Material thermal conductivity.(W/m.K)
thermal_expansion_coefficient (float): Material thermal expansion coefficient.(1/K)
"""
thermal_conductivity: float
thermal_expansion_coefficient: float
# Path: fluidspylib/fluidspy/numerical/state.py
class SimulationState:
state: np.ndarray | NoneType = None
def get_state(self) -> np.ndarray:
return self.state
def set_state(self, value: np.ndarray):
self.state = value
def get_dimension(self):
return self.state.ndim
# Path: fluidspylib/fluidspy/numerical/step.py
class Step:
time: float
vec: Vector
def __init__(
self,
time: float,
vec: Vector = Vector(),
):
"""
Create the time step and the spatial step.
Args:
time (float): The time step.
vec (Vector): The spatial step. Defaults to (0, 0, 0).
"""
self.time = time
self.vec = vec
def __repr__(self) -> str:
return f"({self.time}, {self.vec})"
# Path: fluidspylib/fluidspy/numerical/step.py
class Vector:
x: float
y: float
z: float
def __init__(self, x=inf, y=inf, z=inf) -> None:
"""
Create the spatial step.
"""
self.x = x
self.y = y
self.z = z
def __repr__(self) -> str:
return f"({self.x}, {self.y}, {self.z})"
# Path: fluidspylib/fluidspy/numerical/methods/finite_differential.py
from abc import ABC
from abc import abstractmethod
from typing import List
from ..boundary.composite import CompositeBoundary
from ..dim import Dimension
from ..material_properties import MaterialProperties
from ..material_properties import ThermalProperties
from ..state import SimulationState
from ..step import Step
from ..step import Vector
import numpy as np
class FiniteDifferentialMethod(ABC):
def __init__(
self,
state: SimulationState,
dim: Dimension,
properties: ThermalProperties,
| boundary_conditions: CompositeBoundary, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zorrobyte/esp32-universal-diesel-heater-controller
# Path: states/stateMachine.py
def log(message, level=2):
def handle_state(current_state, switch_value, exhaust_temp, output_temp):
# Path: states/emergencyStop.py
def log(message, level=1):
def turn_off_pumps(timer):
def emergency_stop(reason):
# Path: lib/sensors.py
def log(message, level=1):
def read_temp(analog_value, sensor_type, sensor_beta, sensor_name="output"):
def read_output_temp():
def read_exhaust_temp():
TEMP_HISTORY_LENGTH = 3
R0 = params['R0']
T0 = params['T0']
BETA = sensor_beta
# Path: lib/networking.py
def init_wifi():
def init_mqtt():
def connect_wifi():
def connect_mqtt():
def publish_sensor_values():
def mqtt_callback(topic, msg):
def run_networking():
# Path: lib/fanPID.py
def rpm_interrupt_handler(pin):
def __init__(self, kp, ki, kd):
def calculate(self, setpoint, current_value):
def set_fan_duty_cycle(duty_cycle):
def fan_control_thread():
class PIDController:
# Path: main.py
import machine
import _thread
import hardwareConfig as config
import utime
import webserver
from machine import Timer
from states import stateMachine, emergencyStop
from lib import sensors, networking, fanPID
####################################################################
# WARNING #
####################################################################
# This code is provided "AS IS" without warranty of any kind. #
# Use of this code in any form acknowledges your acceptance of #
# these terms. #
# #
# This code has NOT been tested in real-world scenarios. #
# Improper usage, lack of understanding, or any combination #
# thereof can result in significant property damage, injury, #
# loss of life, or worse. #
# Specifically, this code is related to controlling heating #
# elements and systems, and there's a very real risk that it #
# can BURN YOUR SHIT DOWN. #
# #
# By using, distributing, or even reading this code, you agree #
# to assume all responsibility and risk associated with it. #
# The author(s), contributors, and distributors of this code #
# will NOT be held liable for any damages, injuries, or other #
# consequences you may face as a result of using or attempting #
# to use this code. #
# #
# Always approach such systems with caution. Ensure you understand #
# the code, the systems involved, and the potential risks. #
# If you're unsure, DO NOT use the code. #
# #
# Stay safe and think before you act. #
####################################################################
# Initialize the WDT with a 10-second timeout
wdt = machine.WDT(id=0, timeout=10000) # 10 seconds
def log(message, level=2):
if config.LOG_LEVEL >= level:
print(message)
def get_reset_reason():
reset_reason = machine.reset_cause()
if reset_reason == machine.PWRON_RESET:
print("Reboot was because of Power-On!")
elif reset_reason == machine.WDT_RESET:
print("Reboot was because of WDT!")
return reset_reason
pulse_timer = Timer(0)
last_pulse_time = 0
off_timer = Timer(1)
def turn_off_pump(_):
config.FUEL_PIN.off()
def pulse_fuel_callback(_):
global last_pulse_time
current_time = utime.ticks_ms()
if utime.ticks_diff(current_time, config.heartbeat) > 10000:
config.FUEL_PIN.off()
log("Heartbeat missing, fuel pump turned off.")
elif config.pump_frequency > 0:
period = 1000.0 / config.pump_frequency
if utime.ticks_diff(current_time, last_pulse_time) >= period:
last_pulse_time = current_time
config.FUEL_PIN.on()
off_timer.init(period=int(config.PUMP_ON_TIME * 1000), mode=Timer.ONE_SHOT, callback=turn_off_pump)
else:
config.FUEL_PIN.off()
pulse_timer.init(period=100, mode=Timer.PERIODIC, callback=pulse_fuel_callback)
def emergency_stop_thread():
while True:
wdt.feed()
current_time = utime.ticks_ms() # Use ticks_ms to get the current time in milliseconds
if utime.ticks_diff(current_time, config.heartbeat) > 10000: # Compare in milliseconds (10 seconds = 10000 ms)
emergencyStop.emergency_stop("No heartbeat detected")
utime.sleep(1)
def run_networking_thread():
while True:
networking.run_networking()
utime.sleep(5)
def main():
while True:
config.heartbeat = utime.ticks_ms()
config.output_temp = sensors.read_output_temp()
config.exhaust_temp = sensors.read_exhaust_temp()
current_switch_value = config.SWITCH_PIN.value()
| config.current_state, config.emergency_reason = stateMachine.handle_state( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: suliman-99/django-seeding
# Path: django_seeding/seeders.py
class Seeder():
"""
The `Seeder` class provides a minimal class which may be used
for writing custom seeding implementations.
Required:
seed:
`seed()` as <method>
Additionals:
priority:
`priority` as <class attribute>
or
`get_priority()` as <method>
just_debug:
`just_debug` as <class attribute>
or
`get_just_debug()` as <method>:
"""
def seed(self):
""" Method that fill the datebase as wanted """
raise NotImplementedError('`seed()` must be implemented.')
def _seed(self):
""" Inner method that do validation before calling the public `seed()` method """
id = self._get_id()
# if this seeder is just_debug and the settings state is not debug then dont apply it
if self._get_just_debug() and not settings.DEBUG:
return
# if this seeder is applied before then dont apply it
if AppliedSeeder.objects.filter(id=id).exists():
return
print(f' Seeding {id}...', end='')
# apply the seeder
self.seed()
# store it in the applied seeders table in the database
AppliedSeeder.objects.create(id=id)
GREEN_COLOR = "\033[32m"
WHITE_COLOR = "\033[0m"
print(GREEN_COLOR + " Successfully ^_^ " + WHITE_COLOR)
def get_priority(self):
"""
Method return the `priority` value (smaller will be applied earlier)
if `priority` is passed:
it will be returned
if `priority` is not passed:
float(inf) will be returned
"""
return getattr(self, 'priority', float('inf'))
def _get_priority(self):
""" Innder method to validate the value returned by `get_priority()` method """
priority = self.get_priority()
if not isinstance(priority, float) and not isinstance(priority, int):
raise TypeError('`priority` must be a number')
return priority
def get_just_debug(self):
"""
Method return the `just_debug` value
just_debug=True means this seeder will be applied just when settings.DEBUG=True
if `just_debug` is passed:
it will be returned
if `just_debug` is not passed:
False will be returned
"""
return getattr(self, 'just_debug', False)
def _get_just_debug(self):
""" Innder method to validate the value returned by `get_just_debug()` method """
just_debug = self.get_just_debug()
if not isinstance(just_debug, bool):
raise TypeError('`just_debug` must be a bool value')
return just_debug
def get_id(self):
"""
Method return the `id` value to be stored in the database `AppliedSeeder` table
Note: by this id value we can check if this seeder is applied before or not
it is preferred to not change the id
because after changing thd id the seeder will be considerd as another seeder
then it will be apllied even that the old seeder is applied with the old id value
default value is the name of the class -> str(type(self))
Note:
if you changed the class name
or changed the seeder-class file name
or and file in the path from the root to the class the str(type(self)) will return another value
then the default value of this seeder is changed
then if it doesnt have a constant id the seeder will be applied again
and it may cause errors
so:
give an `id` class attribute to solv this problem
"""
return getattr(self, 'id', str(type(self)))
def _get_id(self):
""" Innder method to validate the value returned by `get_id()` method """
id = self.get_id()
if not isinstance(id, str):
raise TypeError('`id` must be str')
return id
# Path: django_seeding/models.py
class AppliedSeeder(models.Model):
id = models.CharField(max_length=100, primary_key=True)
def __str__(self) -> str:
return self.id
# Path: django_seeding/seeder_registry.py
import sys
import importlib.util
from pathlib import Path
from django.apps import apps
from django.conf import settings
from .seeders import Seeder
from .models import AppliedSeeder
class SeederRegistry:
"""
The `SeederRegistry` class apply registered seeders when the server is run.
seeder registering is doing by:
@SeederRegistry.register as <decorator>
or
SeederRegistry.register(<seeder-class>) as <method>
"""
seeders = []
@classmethod
def register(cls, seeder):
""" Method and decorator to register the seeder-class in the seeders list to be seeded when the server is run """
| if not issubclass(seeder, Seeder): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: cfs-energy/cfspopcon
# Path: cfspopcon/named_options.py
class Algorithms(Enum):
"""Select which top-level algorithm to run."""
predictive_popcon = auto()
two_point_model_fixed_fpow = auto()
two_point_model_fixed_qpart = auto()
two_point_model_fixed_tet = auto()
calc_beta = auto()
calc_core_radiated_power = auto()
calc_fusion_gain = auto()
calc_geometry = auto()
calc_heat_exhaust = auto()
calc_ohmic_power = auto()
calc_peaked_profiles = auto()
calc_plasma_current_from_q_star = auto()
calc_q_star_from_plasma_current = auto()
calc_power_balance_from_tau_e = auto()
calc_zeff_and_dilution_from_impurities = auto()
calc_confinement_transition_threshold_power = auto()
calc_ratio_P_LH = auto()
calc_f_rad_core = auto()
calc_normalised_collisionality = auto()
calc_rho_star = auto()
calc_triple_product = auto()
calc_greenwald_fraction = auto()
calc_current_relaxation_time = auto()
calc_peak_pressure = auto()
calc_average_total_pressure = auto()
calc_bootstrap_fraction = auto()
calc_auxillary_power = auto()
calc_average_ion_temp = auto()
calc_fuel_average_mass_number = auto()
calc_magnetic_field_on_axis = auto()
calc_extrinsic_core_radiator = auto()
require_P_rad_less_than_P_in = auto()
calc_P_SOL = auto()
use_LOC_tau_e_below_threshold = auto()
calc_plasma_stored_energy = auto()
# Path: cfspopcon/named_options.py
class ConfinementScaling(Enum):
r"""Enum of implemented \tau_{E} scalings."""
ITER98y2 = auto()
ITER89P = auto()
ITER89P_ka = auto()
ITERL96Pth = auto()
ITER97L = auto()
IModey2 = auto()
ITPA20_STD5 = auto()
ITPA20_IL = auto()
ITPA20_IL_HighZ = auto()
ITPA_2018_STD5_OLS = auto()
ITPA_2018_STD5_WLS = auto()
ITPA_2018_STD5_GLS = auto()
ITPA_2018_STD5_SEL1_OLS = auto()
ITPA_2018_STD5_SEL1_WLS = auto()
ITPA_2018_STD5_SEL1_GLS = auto()
LOC = auto()
H_DS03 = auto()
# Path: cfspopcon/named_options.py
class Impurity(Enum):
"""Enum of possible impurity elements.
The enum value represents the element's atomic number (Z).
"""
Helium = 2
Lithium = 3
Beryllium = 4
Carbon = 6
Nitrogen = 7
Oxygen = 8
Neon = 10
Argon = 18
Krypton = 36
Xenon = 54
Tungsten = 74
# Path: cfspopcon/named_options.py
class LambdaQScaling(Enum):
"""Options for heat flux decay length scaling."""
Brunner = auto()
EichRegression14 = auto()
EichRegression15 = auto()
# Path: cfspopcon/named_options.py
class MomentumLossFunction(Enum):
"""Select which SOL momentum loss function to use."""
KotovReiter = auto()
Sang = auto()
Jarvinen = auto()
Moulton = auto()
PerezH = auto()
PerezL = auto()
# Path: cfspopcon/named_options.py
class ProfileForm(Enum):
"""Methods to calculate nT profiles."""
analytic = auto()
prf = auto()
# Path: cfspopcon/named_options.py
class RadiationMethod(Enum):
"""Methods to calculate radiation losses."""
Inherent = "Bremsstrahlung and synchrotron radiation only"
PostJensen = "Impurity radiation, using a coronal equilibrium model from Post & Jensen 1977"
MavrinCoronal = "Impurity radiation, using a coronal equilibrium model from Mavrin 2018"
MavrinNoncoronal = "Impurity radiation, using a non-coronal model from Mavrin 2017"
Radas = "Impurity line and bremsstrahlung radiation, using coronal Lz curves from Radas"
# Path: cfspopcon/named_options.py
class ReactionType(Enum):
"""Supported Fusion Fuel Reaction Types."""
DT = "Deuterium-Tritium"
DD = "Deuterium-Deuterium"
DHe3 = "Deuterium-Helium3"
pB11 = "Proton-Boron11"
# Path: cfspopcon/helpers.py
from typing import Any, Union
from .named_options import (
Algorithms,
ConfinementScaling,
Impurity,
LambdaQScaling,
MomentumLossFunction,
ProfileForm,
RadiationMethod,
ReactionType,
)
import xarray as xr
"""Constructors and helper functions."""
def convert_named_options(key: str, val: Any) -> Any: # noqa: PLR0911, PLR0912
"""Given a 'key' matching a named_option, return the corresponding Enum value."""
if key == "algorithms":
return Algorithms[val]
elif key == "energy_confinement_scaling":
return ConfinementScaling[val]
elif key == "profile_form":
| return ProfileForm[val] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: yifei-he/GOAT
# Path: ot_util.py
def ot_ablation(size, mode):
ns, nt = size, size
plan = np.zeros((ns, nt))
ran = np.arange(ns*nt)
np.random.shuffle(ran)
idx = ran[:size]
for i in idx:
row = i // nt
col = i-i//nt * nt
if mode == "random":
plan[row, col] = np.random.uniform()
elif mode == "uniform":
plan[row, col] = 1
plan /= np.sum(plan, 1, keepdims=True)
plan[~ np.isfinite(plan)] = 0
return plan
# Path: ot_util.py
def generate_domains(n_inter, dataset_s, dataset_t, plan=None, entry_cutoff=0, conf=0):
print("------------Generate Intermediate domains----------")
all_domains = []
xs, xt = dataset_s.data, dataset_t.data
ys = dataset_s.targets
if plan is None:
if len(xs.shape) > 2:
xs_flat, xt_flat = nn.Flatten()(xs), nn.Flatten()(xt)
plan = get_OT_plan(xs_flat, xt_flat, solver='emd', entry_cutoff=entry_cutoff)
else:
plan = get_OT_plan(xs, xt, solver='emd', entry_cutoff=entry_cutoff)
logits_t = get_transported_labels(plan, ys, logit=True)
yt_hat, conf_idx = get_conf_idx(logits_t, confidence_q=conf)
xt = xt[conf_idx]
plan = plan[:, conf_idx]
yt_hat = yt_hat[conf_idx]
print(f"Remaining data after confidence filter: {len(conf_idx)}")
for i in range(1, n_inter+1):
x, weights = pushforward(xs, xt, plan, i / (n_inter+1))
if isinstance(x, np.ndarray):
all_domains.append(DomainDataset(torch.from_numpy(x).float(), weights))
else:
all_domains.append(DomainDataset(x, weights))
all_domains.append(dataset_t)
print(f"Total data for each intermediate domain: {len(x)}")
return all_domains
# Path: experiments.py
import torch
import torch.optim as optim
import copy
import argparse
import random
import torch.backends.cudnn as cudnn
import time
from model import *
from train_model import *
from util import *
from ot_util import ot_ablation
from da_algo import *
from ot_util import generate_domains
from dataset import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_source_model(args, trainset, testset, n_class, mode, encoder=None, epochs=50, verbose=True):
print("Start training source model")
model = Classifier(encoder, MLP(mode=mode, n_class=n_class, hidden=1024)).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
trainloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
testloader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
for epoch in range(1, epochs+1):
train(epoch, trainloader, model, optimizer, verbose=verbose)
if epoch % 5 == 0:
test(testloader, model, verbose=verbose)
return model
def run_goat(model_copy, source_model, src_trainset, tgt_trainset, all_sets, generated_domains, epochs=10):
# get the performance of direct adaptation from the source to target, st involves self-training on target
direct_acc, st_acc = self_train(args, model_copy, [tgt_trainset], epochs=epochs)
# get the performance of GST from the source to target, st involves self-training on target
direct_acc_all, st_acc_all = self_train(args, source_model, all_sets, epochs=epochs)
# encode the source and target domains
e_src_trainset, e_tgt_trainset = get_encoded_dataset(source_model.encoder, src_trainset), get_encoded_dataset(source_model.encoder, tgt_trainset)
# encode the intermediate ground-truth domains
intersets = all_sets[:-1]
encoded_intersets = [e_src_trainset]
for i in intersets:
encoded_intersets.append(get_encoded_dataset(source_model.encoder, i))
encoded_intersets.append(e_tgt_trainset)
# generate intermediate domains
generated_acc = 0
if generated_domains > 0:
all_domains = []
for i in range(len(encoded_intersets)-1):
| all_domains += generate_domains(generated_domains, encoded_intersets[i], encoded_intersets[i+1]) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ansible/django-ansible-base
# Path: ansible_base/models/authenticator_map.py
class AuthenticatorMap(NamedCommonModel):
class Meta:
app_label = 'ansible_base'
# If the map type is a team then we must have an org/team
constraints = [
models.CheckConstraint(
name="%(app_label)s_%(class)s_require_org_team_if_team_map",
check=(~models.Q(map_type='team') | models.Q(team__isnull=False) & models.Q(organization__isnull=False)),
),
models.CheckConstraint(
name="%(app_label)s_%(class)s_require_org_if_org_map",
check=(~models.Q(map_type='organization') | models.Q(organization__isnull=False)),
),
]
unique_together = ['name', 'authenticator']
authenticator = models.ForeignKey(
Authenticator,
null=False,
on_delete=models.CASCADE,
help_text="The authenticator this mapping belongs to",
)
revoke = models.BooleanField(
null=False,
default=False,
help_text="If a user does not meet this rule should we revoke the permission",
)
map_type = models.CharField(
max_length=17,
null=False,
default="team",
choices=[
('team', 'team'),
('is_superuser', 'is_superuser'),
('is_system_auditor', 'is_system_auditor'),
('allow', 'allow'),
('organization', 'organization'),
],
help_text='What does the map work on, a team, a user flag or is this an allow rule',
)
team = models.CharField(
max_length=512,
null=True,
default=None,
help_text='A team name this rule works on',
)
organization = models.CharField(
max_length=512,
null=True,
default=None,
help_text='An organization name this rule works on',
)
triggers = models.JSONField(
null=False,
default=dict,
help_text="Trigger information for this rule",
)
order = models.PositiveIntegerField(
null=False,
default=0,
help_text=(
"The order in which this rule should be processed, smaller numbers are of higher precedence. "
"Items with the same order will be executed in random order"
),
)
# Path: ansible_base/serializers/common.py
class CommonModelSerializer(serializers.ModelSerializer):
show_capabilities = ['edit', 'delete']
url = serializers.SerializerMethodField()
related = serializers.SerializerMethodField('_get_related')
summary_fields = serializers.SerializerMethodField('_get_summary_fields')
class Meta:
fields = ['id', 'url', 'created_on', 'created_by', 'modified_on', 'modified_by', 'related', 'summary_fields']
def __init__(self, instance=None, data=empty, **kwargs):
# pre-populate the form with the defaults from the model
model = getattr(self.Meta, 'model', None)
if model:
extra_kwargs = getattr(self.Meta, 'extra_kwargs', {})
for field in model._meta.concrete_fields:
if field.name not in extra_kwargs:
extra_kwargs[field.name] = {}
if not extra_kwargs[field.name].get('initial', None):
if field.default and field.default is not NOT_PROVIDED:
extra_kwargs[field.name]['initial'] = field.default
setattr(self.Meta, 'extra_kwargs', extra_kwargs)
super().__init__(instance, data, **kwargs)
def get_url(self, obj):
if self.reverse_url_name:
return reverse_lazy(self.reverse_url_name, kwargs={'pk': obj.pk})
return ''
def _get_related(self, obj):
if obj is None:
return {}
if not hasattr(obj, 'related_fields'):
logger.warning(f"Object {obj.__class__} has no related_fields method")
return {}
return obj.related_fields(self.context.get('request'))
def _get_summary_fields(self, obj):
if obj is None:
return {}
if not hasattr(obj, 'get_summary_fields'):
logger.warning(f"Object {obj.__class__} has no get_summary_fields method")
return {}
return obj.get_summary_fields()
def to_representation(self, obj):
ret = super().to_representation(obj)
for key in obj.encrypted_fields:
if key in ret:
ret[key] = ENCRYPTED_STRING
return ret
def update(self, instance, validated_data):
# We don't want the $encrypted$ fields going back to the model
for key in self.Meta.model.encrypted_fields:
new_field = validated_data.get(key, None)
if new_field and new_field == ENCRYPTED_STRING:
validated_data.pop(key, None)
return super().update(instance, validated_data)
# Path: ansible_base/utils/encryption.py
ENCRYPTED_STRING = '$encrypted$'
# Path: test_app/models.py
class EncryptionModel(NamedCommonModel):
class Meta:
app_label = "test_app"
encrypted_fields = ['testing1', 'testing2']
testing1 = models.CharField(max_length=1, null=True, default='a')
testing2 = models.CharField(max_length=1, null=True, default='b')
# Path: test_app/serializers.py
class EncryptionTestSerializer(NamedCommonModelSerializer):
reverse_url_name = None
class Meta:
model = EncryptionModel
fields = NamedCommonModelSerializer.Meta.fields + [x.name for x in EncryptionModel._meta.concrete_fields]
# Path: ansible_base/tests/unit/serializers/test_common.py
import pytest
from ansible_base.models import AuthenticatorMap
from ansible_base.serializers.common import CommonModelSerializer
from ansible_base.utils.encryption import ENCRYPTED_STRING
from test_app.models import EncryptionModel
from test_app.serializers import EncryptionTestSerializer
@pytest.mark.django_db
def test_representation_of_encrypted_fields():
model = EncryptionModel.objects.create()
| serializer = EncryptionTestSerializer() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zhudotexe/kani-vision
# Path: kani/ext/vision/parts.py
class ImagePart(MessagePart, abc.ABC):
"""Base class for all image message parts.
Generally, you shouldn't construct this directly - instead, use one of the classmethods to initialize the image from
a file path, binary, or Pillow image.
"""
model_config = ConfigDict(ignored_types=(functools.cached_property,))
# constructors
@staticmethod
def from_path(fp: PathLike):
"""Load an image from a path on the local filesystem."""
return FileImagePart(path=fp)
@staticmethod
def from_bytes(data: bytes):
"""Load an image from binary data in memory."""
return BytesImagePart(data=data)
@staticmethod
def from_image(image: Image.Image):
"""Create an image part from an existing :class:`PIL.Image.Image`."""
return PillowImagePart(pil_image=image)
@classmethod
async def from_url(cls, url: str, remote: bool = True):
"""Create an image part from a URL.
If *remote* is True, this will not download the image - it will be up to the engine to do so!
.. attention::
Note that this classmethod is *asynchronous*, unlike the other classmethods!
This is because we need to check the image headers and metadata before returning a valid image part.
"""
if not remote:
io = BytesIO()
await download_image(url, io)
return BytesImagePart(data=io.getvalue())
size, mime = await image_metadata_from_url(url)
return RemoteURLImagePart(url=url, size_=size, mime_=mime)
# interface
@property
def image(self) -> Image.Image:
"""Get a :class:`PIL.Image.Image` representing the image."""
raise NotImplementedError
@property
def bytes(self) -> bytes:
"""The binary image data."""
io = BytesIO()
self.image.save(io, format="PNG")
return io.getvalue()
@property
def b64(self) -> str:
"""The binary image data encoded in a base64 string.
Note that this is *not* a web-suitable ``data:image/...`` string; just the raw binary of the image. Use
:attr:`b64_uri` for a web-suitable string.
"""
return base64.b64encode(self.bytes).decode()
@property
def b64_uri(self) -> str:
"""Get the binary image data encoded in a web-suitable base64 string."""
return f"data:{self.mime};base64,{self.b64}"
# metadata
@property
def size(self) -> tuple[int, int]:
"""Get the size of the image, in pixels."""
return self.image.size
@property
def mime(self) -> str:
"""Get the MIME filetype of the image."""
img_format = self.image.format
return Image.MIME.get(img_format, f"image/{img_format.lower()}")
# Path: kani/ext/vision/parts.py
class RemoteURLImagePart(ImagePart):
"""A reference to a remote image stored at the given URL.
Use :meth:`.ImagePart.from_url` to construct.
"""
url: str
size_: tuple[int, int]
mime_: str
@property
def image(self):
raise RemoteImageError(
"This engine does not support remote images. Use `await ImagePart.from_url(url, remote=False)` to download"
" the image before using it in this engine."
)
@property
def size(self):
return self.size_
@property
def mime(self):
return self.mime_
# Path: kani/ext/vision/engines/openai/models.py
from typing import Annotated, Literal, Union
from pydantic import Field
from kani.engines.openai.models import OpenAIChatMessage
from kani.models import BaseModel, ChatMessage, ChatRole
from ...parts import ImagePart, RemoteURLImagePart
# note: `type` does not have default since we use `.model_dump(..., exclude_defaults=True)`
class OpenAIText(BaseModel):
type: Literal["text"]
text: str
@classmethod
def from_text(cls, data: str):
return cls(type="text", text=data)
class OpenAIImage(BaseModel):
type: Literal["image_url"]
image_url: str
detail: Literal["high"] | Literal["low"] | None = None
@classmethod
def from_imagepart(cls, part: ImagePart):
| if isinstance(part, RemoteURLImagePart): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: line/Skeleton-Temporal-Action-Localization
# Path: evaluation/classificationMAP.py
def getClassificationMAP(confidence, labels):
""" confidence and labels are of dimension n_samples x n_label """
AP = []
for i in range(np.shape(labels)[1]):
AP.append(getAP(confidence[:, i], labels[:, i]))
return 100 * sum(AP) / len(AP)
# Path: evaluation/detectionMAP.py
def getSingleStreamDetectionMAP(
vid_preds, frm_preds, vid_lens, annotation_path, args, multi=False, factor=1.0
):
iou_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
dmap_list = []
seg = getActLoc(
vid_preds,
frm_preds,
vid_lens,
np.arange(args.start_threshold, args.end_threshold, args.threshold_interval),
annotation_path,
args,
multi=multi,
)
# print (len(seg))
for iou in iou_list:
print("Testing for IoU %f" % iou)
dmap_list.append(
getLocMAP(seg, iou, annotation_path, args, multi=multi, factor=factor)
)
return dmap_list, iou_list
# Path: evaluation/detectionMAP.py
def getTwoStreamDetectionMAP(
rgb_vid_preds,
flow_vid_preds,
rgb_frm_preds,
flow_frm_preds,
vid_lens,
annotation_path,
args,
):
iou_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
dmap_list = []
rgb_seg = getActLoc(
rgb_vid_preds,
rgb_frm_preds * 0.1,
vid_lens,
np.arange(args.start_threshold, args.end_threshold, args.threshold_interval)
* 0.1,
annotation_path,
args,
)
flow_seg = getActLoc(
flow_vid_preds,
flow_frm_preds,
vid_lens,
np.arange(args.start_threshold, args.end_threshold, args.threshold_interval),
annotation_path,
args,
)
seg = IntergrateSegs(rgb_seg, flow_seg, 0.9, args)
for iou in iou_list:
print("Testing for IoU %f" % iou)
dmap_list.append(getLocMAP(seg, iou, annotation_path, args))
return dmap_list, iou_list
# Path: evaluation/utils.py
def write_results_to_eval_file(args, dmap, itr1, itr2):
file_folder = "./ckpt/" + args.dataset_name + "/eval/"
file_name = args.dataset_name + "-results.log"
fid = open(file_folder + file_name, "a+")
string_to_write = str(itr1)
string_to_write += " " + str(itr2)
for item in dmap:
string_to_write += " " + "%.2f" % item
fid.write(string_to_write + "\n")
fid.close()
# Path: evaluation/utils.py
def write_results_to_file(args, dmap, cmap, itr):
file_folder = "./ckpt/" + args.dataset_name + "/" + str(args.model_id) + "/"
file_name = args.dataset_name + "-results.log"
fid = open(file_folder + file_name, "a+")
string_to_write = str(itr)
for item in dmap:
string_to_write += " " + "%.2f" % item
string_to_write += " " + "%.2f" % cmap
fid.write(string_to_write + "\n")
fid.close()
# Path: evaluation/eval.py
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from .classificationMAP import getClassificationMAP as cmAP
from .detectionMAP import getSingleStreamDetectionMAP as dsmAP
from .detectionMAP import getTwoStreamDetectionMAP as dtmAP
from .utils import write_results_to_eval_file, write_results_to_file
def ss_eval(epoch, dataloader, args, logger, model, device):
vid_preds = []
frm_preds = []
vid_lens = []
labels = []
for num, sample in enumerate(dataloader):
if (num + 1) % 100 == 0:
print("Testing test data point %d of %d" % (num + 1, len(dataloader)))
features = sample["data"].numpy()
label = sample["labels"].numpy()
vid_len = sample["vid_len"].numpy()
features = torch.from_numpy(features).float().to(device)
with torch.no_grad():
_, vid_pred, _, frm_scr = model(Variable(features))
frm_pred = F.softmax(frm_scr, -1)
vid_pred = np.squeeze(vid_pred.cpu().data.numpy(), axis=0)
frm_pred = np.squeeze(frm_pred.cpu().data.numpy(), axis=0)
label = np.squeeze(label, axis=0)
vid_preds.append(vid_pred)
frm_preds.append(frm_pred)
vid_lens.append(vid_len)
labels.append(label)
vid_preds = np.array(vid_preds)
frm_preds = np.array(frm_preds)
vid_lens = np.array(vid_lens)
labels = np.array(labels)
cmap = cmAP(vid_preds, labels)
dmap, iou = dsmAP(
vid_preds, frm_preds, vid_lens, dataloader.dataset.path_to_annotations, args
)
print("Classification map %f" % cmap)
for item in list(zip(iou, dmap)):
print("Detection map @ %f = %f" % (item[0], item[1]))
logger.log_value("Test Classification mAP", cmap, epoch)
for item in list(zip(dmap, iou)):
logger.log_value("Test Detection1 mAP @ IoU = " + str(item[1]), item[0], epoch)
| write_results_to_file(args, dmap, cmap, epoch) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: n-thumann/xbox-cloud-statistics
# Path: backend/xbox_cloud_statistics/models.py
class Game(Model):
id: str
title: str
image_url: str
subscriptions: Subscription
def to_dict(self) -> dict:
return {"id": self.id, "title": self.title, "image_url": self.image_url}
def __lt__(self, other) -> bool:
if not isinstance(other, Game):
raise TypeError
return self.id < other.id
# Path: backend/xbox_cloud_statistics/models.py
class Measurement(Model):
server_time: datetime
wait_time: int
def to_dict(self) -> dict:
return {int(self.server_time.timestamp()): self.wait_time}
# Path: backend/xbox_cloud_statistics/models.py
class Results(Model):
_games: dict[Game, GameResult] = field(
default_factory=lambda: defaultdict(GameResult)
)
def __getitem__(self, game: Game) -> GameResult:
return self._games[game]
def __iter__(self) -> Iterator[tuple[Game, GameResult]]:
return iter(sorted(self._games.items()))
def to_dict(self) -> dict:
return {game.id: game_result for game, game_result in self}
# Path: backend/xbox_cloud_statistics/models.py
class Subscription(Flag):
F2P = auto()
GPU = auto()
@classmethod
def from_string(cls, value: str):
return cls._member_map_.get(value)
# Path: backend/xbox_cloud_statistics/main.py
import asyncio
import itertools
import httpx
from pathlib import Path
from xbox_cloud_statistics.client import XBoxCloudClient
from xbox_cloud_statistics.config import Config
from xbox_cloud_statistics.io.cli import CLI
from xbox_cloud_statistics.io.json import JSON
from .models import (
Game,
Measurement,
Results,
Subscription,
)
def run():
asyncio.run(main())
async def main():
config = Config()
results = Results()
async with httpx.AsyncClient(http2=True) as http_client:
client = XBoxCloudClient(http_client, config.client_id, config.client_secret)
if config.f2p_token:
await run_measurements(
client,
Subscription.F2P,
config.f2p_token,
config.f2p_games,
results,
)
if config.gpu_token:
await run_measurements(
client,
Subscription.GPU,
config.gpu_token,
config.gpu_games,
results,
)
CLI.handle(results)
JSON.handle(results, Path("./results"))
async def run_measurements(
client: XBoxCloudClient,
subscription: Subscription,
token: str,
games: list[Game],
results: Results,
):
await client.login(subscription, token)
games_regions = list(itertools.product(games, client.regions))
coroutines = [client.measure(region, game) for game, region in games_regions]
| times: list[Measurement | Exception] = await asyncio.gather( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: albu-org/aiotp
# Path: aiotp/core/otp.py
class OTP(AbstractOTP):
def __init__(
self,
secret: str,
digit: int = 5,
algorithm: algorithms = 'sha1'
) -> None:
assert 0 < digit < 11
assert algorithm.lower() in ('sha1', 'sha256', 'sha512')
self.digit = digit
self.secret = secret
self.algorithm = algorithm
async def _generate(self, integer: int) -> str:
if integer < 0:
raise ValueError('input must be positive integer')
int2bytes = struct.pack('>q', integer)
b_secret = base64.b32decode(self.secret + '=' * ((8 - len(self.secret)) % 8), casefold=True)
hash_hmac = hmac.new(b_secret, int2bytes, self.algorithm).digest()
offset = hash_hmac[-1] & 0xF
code_bytes = hash_hmac[offset:offset + 4]
code = str(struct.unpack('>l', code_bytes)[0] & 0X7FFFFFFF)
return code[-self.digit:].zfill(self.digit)
# Path: aiotp/utils/utils.py
async def conversion(date_time: datetime.datetime, interval: int) -> int:
if date_time.tzinfo:
return int(calendar.timegm(date_time.utctimetuple()) / interval)
else:
return int(time.mktime(date_time.timetuple()) / interval)
# Path: aiotp/typing.py
# Path: aiotp/abstracts/abstracts.py
class AbstractTOTP(ABC):
"""AbstractBase"""
@abstractmethod
async def create(self, dt: datetime) -> str:
"""generate the TOTP code"""
@abstractmethod
async def verify(self, code: str, dt: datetime) -> bool:
"""verify the TOTP code"""
@abstractmethod
async def uri(self, name: str, issuer: Optional[str], image: Optional[str]) -> str:
"""generate the uri"""
# Path: aiotp/totp/totp.py
import hmac
import datetime
import unicodedata
from typing import Optional
from urllib.parse import quote, urlencode, urlparse
from ..core import OTP
from ..utils import conversion
from ..typing import algorithms
from ..abstracts import AbstractTOTP
class TOTP(AbstractTOTP, OTP):
def __init__(
self,
secret: str,
digits: int = 5,
interval: int = 60,
algorithm: algorithms = 'sha1',
) -> None:
self.interval = interval
super().__init__(secret, digits, algorithm)
async def __aenter__(self) -> 'TOTP':
return self
async def __aexit__(self, *args, **kwargs) -> None:
...
async def create(self, dt: Optional[datetime.datetime] = None) -> str:
if not dt:
dt = datetime.datetime.now()
| return await self._generate(await conversion(dt, self.interval)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: brandonrobertz/reason-act-sqlite-py
# Path: actions.py
DB_PATH = "example.db"
# Path: actions.py
def load_db(path):
assert os.path.exists(path), f"Database doesn't exist: {path}"
db = sqlite_utils.Database(path)
return db
# Path: actions.py
def tables(db):
return [
name
for name in db.table_names()
# game stats confuses the model
if (
"_fts" not in name
and name not in IGNORED_TABLES
and not name.endswith("_history")
)
]
# Path: actions.py
def schema(db, table_name):
table_names = tables(db)
if table_name not in table_names:
return f"Error: Invalid table. Valid tables are: {table_names}"
return re.sub('\s+', ' ', db[table_name].schema)
# Path: actions.py
def help(db, *args):
if not args:
return "Error: The help action requires at least one argument"
table_name = args[0]
column = None
if len(args) == 2:
column = args[1]
if table_name not in DATA_HELP:
available_tables = tables(db)
return f"Error: The table {table_name} doesn't exist. Valid tables: {available_tables}"
if column not in DATA_HELP[table_name]:
available_columns = [
c.name
for c in db[table_name].columns
if c.name not in IGNORED_COLUMNS
]
return f"Error: The column {column} isn't in the {table_name} table. Valid columns: {available_columns}"
help_text = DATA_HELP[table_name][column]
# table help requested
if column is None:
return help_text
# column help requested, add common values
analysis = db[table_name].analyze_column(column, common_limit=2)
common_values = ", ".join([f"{value}" for value, count in analysis.most_common])
return f"{help_text} the top two values are: {common_values}"
# Path: actions.py
def sql_query(db, query):
if query.lower().startswith("select *"):
return "Error: Select some specific columns, not *"
try:
results = list(db.query(query))
except sqlite3.OperationalError as e:
return f"Your query has an error: {e}"
return clean_truncate(results, n=5)
# Path: llm_sql_queries.py
import json
import os
import re
import sys
import sqlite3
from llama_cpp import Llama
from actions import (
DB_PATH, load_db,
tables, schema, help, sql_query
)
try:
except ModuleNotFoundError:
print("llama_cpp not installed, continuing without")
# Larger context sizes will reduce quality, but some models
# support large contexts better than others.
#CONTEXT_SIZE=2048
CONTEXT_SIZE=2048*2
# how many tokens to allow the model to output in a sigle go w/o stopping
MAX_TOKENS=400
# Utils n stuff
def load_model(model_path, n_gpu_layers=0, n_threads=os.cpu_count() - 1,
n_ctx=CONTEXT_SIZE, temp=None, top_p=None):
# for LLaMA2 70B models add kwarg: n_gqa=8 (NOTE: not required for GGUF models)
print("Loading model", model_path)
print("CTX:", n_ctx, "GPU layers:", n_gpu_layers, "CPU threads:", n_threads)
print("Temperature:", temp, "Top-p Sampling:", top_p)
kwargs = dict(
model_path=model_path,
n_ctx=n_ctx,
n_gpu_layers=n_gpu_layers,
n_threads=n_threads,
verbose=False
)
if temp is not None:
kwargs["temp"] = temp
if top_p is not None:
kwargs["top_p"] = top_p
llm = Llama(**kwargs)
return llm
def execute(model_path, outfile=None, debug=True, return_dict=None,
prompt=None, n_gpu_layers=0, temp=None, top_p=None):
llm = load_model(model_path, n_gpu_layers=n_gpu_layers, temp=temp,
top_p=top_p)
| db = load_db(DB_PATH) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: sehyun03/MulActSeg
# Path: dataloader/region_cityscapes_tensor.py
class RegionCityscapesTensor(RegionCityscapes):
def __init__(self, args, root, datalist, split='train', transform=None, region_dict="dataloader/init_data/cityscapes/train.dict"):
super().__init__(args, root, datalist, split, transform, False, region_dict, True, False)
self.kernel = np.ones((args.trim_kernel_size, args.trim_kernel_size), np.uint8)
def __getitem__(self, index):
img_fname, lbl_fname, spx_fname = self.im_idx[index]
'''Load image, label, and superpixel'''
image = Image.open(img_fname).convert('RGB')
target = Image.open(lbl_fname)
superpixel = self.open_spx(spx_fname)
image, lbls = self.transform(image, [target, superpixel])
target, superpixel = lbls
target = self.encode_target(target)
''' superpixel tensor generation '''
superpixel_cls = torch.zeros((self.args.nseg, self.args.num_classes + 1), dtype=torch.uint8)
superpixel_size = torch.ones((self.args.nseg, ), dtype=torch.int) * -1
'''GT masking (mimic region-based annotation)'''
target = target.reshape(-1)
preserving_labels = self.suppix[spx_fname]
### trim query boundary
if self.args.trim_multihot_boundary:
bdry = find_boundaries(superpixel, mode='thick')
bdry = binary_dilation(bdry, self.kernel)
bdry = torch.from_numpy(bdry)
superpixel_trim = superpixel.clone()
superpixel_trim = torch.masked_fill(superpixel_trim, bdry, self.args.nseg)
superpixel_trim = superpixel_trim.reshape(-1)
else:
pass
superpixel = superpixel.reshape(-1)
''' Multi-hot label assignment '''
for p in preserving_labels:
if self.args.trim_multihot_boundary:
sp_mask = (superpixel_trim == p)
sp_mask = sp_mask if torch.any(sp_mask) else (superpixel == p) # boundary 때문에 소실되는 걸 방지
else:
sp_mask = (superpixel == p)
# Image.fromarray(sp_mask.reshape(1024,2048).numpy()).save("vis/trim/new_{}_trim_{}x{}_mask.png".format(p, self.args.trim_kernel_size, self.args.trim_kernel_size))
u, c = np.unique(target[sp_mask], return_counts=True) ### superpixel 내부에 class 구성 파악
isignore = 255 in u
if isignore and len(u) == 1:
allignore = True
else:
allignore = False
npx = sp_mask.sum()
if not allignore:
u_valid = u[u != 255]
c_valid = c[u != 255]
c_order = c_valid.argsort()[::-1]
cls = u_valid[c_order].tolist()
cpx = c_valid[c_order].tolist()
else:
cls = []
cpx = []
if isignore:
cls.append(-1) ### last dimension of superpixel_cls is assigned to ignore label
else:
pass
superpixel_cls[p, cls] = 1
superpixel_size[p] = npx
sample = {'superpixel_info': (superpixel_cls, superpixel_size), 'fname': self.im_idx[index]}
return sample
# Path: dataloader/utils.py
class DataProvider():
def __init__(self, dataset, batch_size, num_workers, drop_last, shuffle,
pin_memory):
# dataset
self.dataset = dataset
self.iteration = 0
self.epoch = 0
# dataloader parameters
self.batch_size = batch_size
self.num_workers = num_workers
self.drop_last = drop_last
self.shuffle = shuffle
self.pin_memory = pin_memory
self.dataloader = \
DataLoader(self.dataset, batch_size=self.batch_size, collate_fn=collate_fn,
shuffle=self.shuffle, num_workers=self.num_workers, drop_last=self.drop_last,
pin_memory=self.pin_memory)
self.dataiter = iter(self.dataloader)
def __len__(self):
return len(self.dataloader)
def __next__(self):
try:
batch = self.dataiter.next()
self.iteration += 1
return batch
except StopIteration:
self.epoch += 1
self.dataiter = iter(self.dataloader)
batch = self.dataiter.next()
self.iteration += 1
return batch
# Path: tools/label_assignment_tensor.py
import os
import sys
import argparse
import numpy as np
import dataloader.ext_transforms as et
from tqdm import tqdm
from dataloader.region_cityscapes_tensor import RegionCityscapesTensor
from dataloader.utils import DataProvider
sys.path.append(os.path.abspath('.'))
def get_parser():
# Training configurations
parser = argparse.ArgumentParser(description='')
parser.add_argument('--nseg', type=int, default=2048, help='# superpixel component for slic')
parser.add_argument('--save_data_dir', help='superpixel directory root')
parser.add_argument('--num_worker', type=int, default=8, help='number of classes in dataset')
parser.add_argument('--ignore_size', type=int, default=0, help='(or_lbeling) ignore class region smaller than this')
parser.add_argument('--mark_topk', type=int, default=-1, help='(or_lbeling) ignore classes with the region size under than kth order')
parser.add_argument('--num_classes', type=int, default=19, help='number of classes in dataset')
parser.add_argument('--trim_kernel_size', type=int, default=3)
parser.add_argument('--trim_multihot_boundary', action='store_true', default=False)
parser.add_argument('--prob_dominant', action='store_true', default=False)
parser.add_argument('--spx_method', default='seed')
parser.add_argument('--trg_data_dir', default='./data/Cityscapes')
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
args.trg_datalist = 'dataloader/init_data/cityscapes/train_{}{}.txt'.format(args.spx_method, args.nseg)
args.region_dict = 'dataloader/init_data/cityscapes/train_{}{}.dict'.format(args.spx_method, args.nseg)
args.known_ignore = False
print(args)
identity_transform = et.ExtCompose([et.ExtToTensor(dtype_list=['int','int'])])
### load superpixel & max-frequent pooled target
| region_dataset = RegionCityscapesTensor(args, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: upiterbarg/hihack
# Path: models/transformer_lstm.py
def generate_square_subsequent_mask(sz: int, device: str = "cpu") -> torch.Tensor:
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = (
mask.float()
.masked_fill(mask == 0, float("-inf"))
.masked_fill(mask == 1, float(0.0))
).to(device=device)
return mask
# Path: models/transformer_lstm.py
class PositionalEncoding(nn.Module):
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
pe = torch.zeros(max_len, 1, d_model)
pe[:, 0, 0::2] = torch.sin(position * div_term)
pe[:, 0, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x: Tensor, shape [seq_len, batch_size, embedding_dim]
"""
x = x + self.pe[:x.size(0)]
return self.dropout(x)
# Path: models/flat_transformer.py
import json
import numpy as np
import os
import pathlib
import pdb
import sys
import torch
from nle import nethack
from nle.nethack.actions import ACTIONS as A
from torch import nn
from torch.nn import functional as F
from .transformer_lstm import (
generate_square_subsequent_mask,
PositionalEncoding
)
from chaotic_dwarf import (
TopLineEncoder,
BottomLinesEncoder,
ScreenEncoder,
conv_outdim
)
base_path = pathlib.Path().resolve()
sys.path.insert(0, os.path.join(base_path, '..', 'dungeonsdata-neurips2022/experiment_code/hackrl/models'))
class FlatTransformer(nn.Module):
def __init__(self, shape, action_space, flags, device):
super(FlatTransformer, self).__init__()
self.flags = flags
self.num_actions = len(action_space)
self.use_prev_action = flags.use_prev_action
self.topline_encoder = TopLineEncoder()
self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())
pixel_size = flags.pixel_size
if flags.crop_dim == 0:
screen_shape = (24 * pixel_size, 80 * pixel_size)
else:
screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)
self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))
self.prev_actions_dim = 128 if self.use_prev_action else 0
self.h_dim = sum(
[
self.topline_encoder.hidden_dim,
self.bottomline_encoder.hidden_dim,
self.screen_encoder.hidden_dim,
self.prev_actions_dim,
]
)
self.num_attention_heads = flags.num_attention_heads
self.num_transformer_encoder_layers = flags.num_transformer_layers
core_layer = nn.TransformerEncoderLayer(d_model=self.h_dim, nhead=self.num_attention_heads)
self.core = nn.TransformerEncoder(core_layer, num_layers=self.num_transformer_encoder_layers)
self.positional_encoder = PositionalEncoding(self.h_dim)
self.policy_hidden_dim = 1024
self.policy = nn.Sequential(nn.Linear(self.h_dim, self.policy_hidden_dim),
nn.ELU(),
nn.Linear(self.policy_hidden_dim, self.policy_hidden_dim),
nn.ELU(),
nn.Linear(self.policy_hidden_dim, self.num_actions)
)
self.baseline = nn.Linear(self.h_dim, 1)
self.version = 0
self.inference_unroll_length = 1
def initial_state(self, batch_size=1):
return (
torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length),
torch.rand(self.inference_unroll_length, batch_size, self.h_dim)
)
def forward(self, inputs, core_state=None):
T, B, C, H, W = inputs["screen_image"].shape
topline = inputs["tty_chars"][..., 0, :]
bottom_line = inputs["tty_chars"][..., -2:, :]
st = [
self.topline_encoder(
topline.float(memory_format=torch.contiguous_format).view(T * B, -1)
),
self.bottomline_encoder(
bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)
),
self.screen_encoder(
inputs["screen_image"]
.float(memory_format=torch.contiguous_format)
.view(T * B, C, H, W)
),
]
if self.use_prev_action:
st.append(torch.nn.functional.one_hot(inputs["prev_action"], self.prev_actions_dim).view(T * B, -1))
st = torch.cat(st, dim=1)
core_input = st.reshape(T, B, -1)
notdone = (~inputs["done"]).float()
if not self.training:
prev_mask, prev_encodings = core_state
prev_mask = prev_mask.squeeze(0)
core_input = torch.cat([prev_encodings[1:], core_input], axis=0)
core_mask = torch.stack(
[torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(core_input.device)], axis=1) for i in range(B)]
)
core_mask[:, -1, -1] = 1
core_state = (core_mask.detach().clone().unsqueeze(0),
core_input.detach().clone()
)
for i in range(B):
core_mask[i].fill_diagonal_(1)
core_mask = (core_mask.float().masked_fill(core_mask == 0, float("-inf")).masked_fill(core_mask == 1, float(0.0))).to(device=core_input.device)
core_mask = torch.repeat_interleave(core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length)
T = core_input.shape[0]
else:
| core_mask = generate_square_subsequent_mask(T, core_input.device) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kulkansecurity/gitverify
# Path: include/gh_api.py
GITHUB_API_URL = "https://api.github.com/repos/"
GITHUB_TOKEN = os.environ.get("GH_ACCESS_TOKEN", None)
def github_request_json(url):
def fetch_domains_from_code(repository):
def fetch_repository(github_url):
def fetch_contributors(repo_obj):
def fetch_issues_and_prs(repo_obj):
def fetch_contributor(contributor_obj):
def fetch_contributor_contributions(repo_obj, contributor_obj):
def json_request(url):
# Path: include/output.py
class Output:
ANSI_RESET = "\033[0m"
ANSI_BLUE = "\033[94m"
ANSI_GREEN = "\033[92m"
ANSI_RED = "\033[91m"
ANSI_YELLOW = "\033[93m"
def __init__(self, verbose=False, outfile=None, outformat='text'):
def initialize_repo_output(self, repository):
def positive(self, message, weight=0):
def negative(self, message, weight=0):
def debug(self, message):
def warn(self, message):
def _create_text_output(self):
def _create_json_output(self):
def _create_csv_output(self):
def doOutput(self):
# Path: include/arg_parser.py
def parse_repositories_from_file(filepath):
def validate_repository(repo):
def parse_arguments():
# Path: modules/verify_metadata.py
def run(repository, output_obj):
THRESHOLD = len(gh_api.fetch_contributors(repository))
# Path: modules/verify_contributors.py
def run(repository, output_obj):
# Path: modules/verify_domains.py
def run(repository, output_obj):
# Path: modules/verify_issues_prs.py
def run(repository, contributors, output_obj):
# Path: gitverify.py
import os, sys
from include import gh_api, output, arg_parser
from modules import verify_metadata
from modules import verify_contributors
from modules import verify_domains
from modules import verify_issues_prs
#!/usr/bin/env python3
if __name__ == "__main__":
args = arg_parser.parse_arguments()
output_obj = output.Output(verbose=args.verbose, outfile=args.outfile, outformat=args.format)
print("""
░██████╗░██╗████████╗██╗░░░██╗███████╗██████╗░██╗███████╗██╗░░░██╗
██╔════╝░██║╚══██╔══╝██║░░░██║██╔════╝██╔══██╗██║██╔════╝╚██╗░██╔╝
██║░░██╗░██║░░░██║░░░╚██╗░██╔╝█████╗░░██████╔╝██║█████╗░░░╚████╔╝░
██║░░╚██╗██║░░░██║░░░░╚████╔╝░██╔══╝░░██╔══██╗██║██╔══╝░░░░╚██╔╝░░
╚██████╔╝██║░░░██║░░░░░╚██╔╝░░███████╗██║░░██║██║██║░░░░░░░░██║░░░
░╚═════╝░╚═╝░░░╚═╝░░░░░░╚═╝░░░╚══════╝╚═╝░░╚═╝╚═╝╚═╝░░░░░░░░╚═╝░░░
GitVerify: Is the repo trustworthy? Make an informed decision.
v1.0 - https://www.kulkan.com
######################################################################################""")
# Let's warn the user that unauth RateLimits are pretty low
if os.environ.get("GH_ACCESS_TOKEN", None) == None:
output_obj.warn("GH_ACCESS_TOKEN environment variable not set, using GitHub RateLimits for anonymous queries")
output_obj.warn("Unauthenticated requests to the Github API will enforce a very low and strict RateLimit")
print("For information on how to create a GitHub API Access Token refer to: ")
print("https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens")
if os.environ.get("VT_API_KEY", None) == None:
output_obj.warn("VT_API_KEY environment variable not set, disabling VirusTotal checks.")
print("For information on how to create a VirusTotal API Key refer to: ")
print("https://www.virustotal.com/en/documentation/public-api/")
args.disable_vt = True
if not args.repositories_file:
args.repositories_file = [args.repository]
for repo in args.repositories_file:
try:
repository = gh_api.fetch_repository(repo)
print("######################################################################################")
print("Now verifying repository: {}".format(repository.get('full_name')))
except Exception as ex:
print("Unable to pull data for the repository that was provided. Is it a valid repo URL?")
if args.verbose:
print(ex)
sys.exit()
output_obj.initialize_repo_output(repository.get('full_name'))
verify_metadata.run(repository, output_obj)
# We store the result from contributors() to prevent calling it again for I+PRS
| contributors = verify_contributors.run(repository, output_obj) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nmathey/finasync
# Path: finasync/constants.py
GNOSIS_API_TOKENLIST_URI = (
"https://blockscout.com/xdai/mainnet/api?module=account&action=tokenlist&address="
)
# Path: finasync/constants.py
REALT_API_TOKENLIST_URI = "https://api.realt.community/v1/token"
# Path: finasync/constants.py
REALT_OFFLINE_TOKENS_LIST = "RealT_OfflineTokensList.json"
# Path: finasync/utils.py
def convert_currency(amount, from_currency, to_currency):
Now_Time = datetime.today()
Exchange_OfflineRates_Path = Path(
EXCHANGE_OFFLINE_RATES_PATH
+ "Exchange_OfflineRates_To_"
+ to_currency
+ ".json"
)
Exchange_OfflineRates_Path.touch(exist_ok=True)
converted_amount = 0
with open(Exchange_OfflineRates_Path) as json_file:
try:
Exchange_OfflineRates = json.load(json_file)
except JSONDecodeError:
Exchange_OfflineRates = {
"info": {
"last_sync": str(datetime.timestamp(Now_Time - timedelta(weeks=2)))
},
"data": {},
}
# Fetch latest exchange rates only if local cache > 1 week
if float(Exchange_OfflineRates["info"]["last_sync"]) < datetime.timestamp(
Now_Time - timedelta(weeks=1)
):
response = requests.get(EXCHANGE_RATES_API_URI + to_currency)
Exchange_OfflineRates["info"]["last_sync"] = str(datetime.timestamp(Now_Time))
Exchange_OfflineRates["data"] = response.json()
data = Exchange_OfflineRates["data"]
if "rates" in data:
rates = data["rates"]
if from_currency in rates and to_currency in rates:
converted_amount = amount / rates[from_currency]
else:
raise ValueError("Invalid currency!")
else:
raise ValueError("Unable to fetch exchange rates!")
with open(Exchange_OfflineRates_Path, "w") as outfile:
json.dump(Exchange_OfflineRates, outfile, indent=4)
return round(converted_amount, 2)
# Path: finasync/realt.py
import requests
import re
import json
import time
import os
import logging
from pathlib import Path
from datetime import datetime, timedelta
from json.decoder import JSONDecodeError
from finary_uapi.user_real_estates import (
get_user_real_estates,
delete_user_real_estates,
update_user_real_estates,
add_user_real_estates,
add_user_real_estates_with_currency,
)
from finary_uapi.user_me import get_display_currency_code
from .constants import (
GNOSIS_API_TOKENLIST_URI,
REALT_API_TOKENLIST_URI,
REALT_OFFLINE_TOKENS_LIST,
)
from .utils import convert_currency
def get_realt_token_details(realt_token_contractAdress):
Now_Time = datetime.today()
RealT_OfflineTokensList_Path = Path(REALT_OFFLINE_TOKENS_LIST)
RealT_OfflineTokensList_Path.touch(exist_ok=True)
with open(RealT_OfflineTokensList_Path) as json_file:
try:
RealT_OfflineTokensList = json.load(json_file)
except JSONDecodeError:
RealT_OfflineTokensList = {
"info": {
"last_sync": str(datetime.timestamp(Now_Time - timedelta(weeks=2)))
},
"data": {},
}
# Update offlineTokensList from RealT API only if more than 1 week old
if float(RealT_OfflineTokensList["info"]["last_sync"]) < datetime.timestamp(
Now_Time - timedelta(weeks=1)
):
MyRealT_API_Header = {
"Accept": "*/*",
"X-AUTH-REALT-TOKEN": os.environ["MYREALT_API_KEY"],
}
TokensListReq = requests.get(
| REALT_API_TOKENLIST_URI, headers=MyRealT_API_Header |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: biggzlar/plausible-uncertainties
# Path: evidential_regression/layers.py
class DenseInverseGamma(torch.nn.Module):
""" Based on: https://github.com/aamini/evidential-deep-learning.
"""
def __init__(self, in_features, units=1):
super(DenseInverseGamma, self).__init__()
self.units = units
self.dense = torch.nn.Linear(in_features=in_features, out_features=4 * self.units)
self.softplus = torch.nn.Softplus()
def evidence(self, x):
return self.softplus(x)
def forward(self, x):
output = self.dense(x)
mu, logv, logalpha, logbeta = torch.split(output, split_size_or_sections=self.units, dim=-1)
nu = self.evidence(logv)
alpha = self.evidence(logalpha) + 2
beta = self.evidence(logbeta)
return mu, nu, alpha, beta
# Path: evidential_regression/layers.py
class DenseInverseWishart(torch.nn.Module):
def __init__(self, in_features, p=1, mu_activation=None):
super(DenseInverseWishart, self).__init__()
self.p = p
self.diag_indices = [i for i in range(self.p)]
self.tril_indices = torch.tril_indices(self.p, self.p).tolist()
self.mu = torch.nn.Linear(in_features=in_features, out_features=self.p)
self.params = torch.nn.Linear(in_features=in_features, out_features=2)
self.n_decomposit_units = int((1 + self.p) * self.p / 2)
self.L_decomposit = torch.nn.Linear(in_features=in_features, out_features=self.p**2)
self.softplus = torch.nn.Softplus()
self.mu_activation = mu_activation
def evidence(self, x):
return self.softplus(x)
def forward(self, x):
mu = self.mu(x)
params = self.params(x)
lognu, logkappa = torch.split(params, split_size_or_sections=1, dim=-1)
if self.mu_activation is not None:
mu = self.mu_activation(mu)
nu = self.evidence(lognu) + self.p + 1
kappa = self.evidence(logkappa) + 1
L = self.L_decomposit(x)
L = L.view(-1, self.p, self.p)
L = torch.tril(L, diagonal=-1) + torch.diag_embed(1e-2 + self.evidence(torch.diagonal(L, dim1=-2, dim2=-1)))
# non_zeros = self.L_decomposit(x)
# L = torch.zeros((x.shape[0], self.p, self.p))
# L[:, self.tril_indices[0], self.tril_indices[1]] = non_zeros
# L[:, self.diag_indices, self.diag_indices] = self.evidence(L[:, self.diag_indices, self.diag_indices])
return mu, nu, kappa, L
# Path: evidential_regression/networks.py
import torch
import torch.nn as nn
import numpy as np
from .layers import DenseInverseGamma, DenseInverseWishart
class UnivariateDerNet(nn.Module):
def __init__(self):
super(UnivariateDerNet, self).__init__()
self.hidden = nn.Sequential(
nn.Linear(in_features=1, out_features=128),
# nn.ReLU6(),
# nn.Tanh(),
nn.Mish(),
nn.Linear(in_features=128, out_features=128),
# nn.ReLU6(),
# nn.Tanh(),
nn.Mish(),
nn.Linear(in_features=128, out_features=128),
# nn.ReLU6(),
# nn.Tanh(),
nn.Mish(),
nn.Linear(in_features=128, out_features=128),
| DenseInverseGamma(in_features=128, units=1) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: t-ega/whatsapp-cloud-sdk
# Path: whatsapp_cloud_sdk/_utils/types.py
class MessageTypes(Enum):
IMAGE = "image"
AUDIO = "audio"
TEXT = "text"
REACTION = "reaction"
STICKER = "sticker"
LOCATION = "location"
UNKNOWN = "unknown"
# Path: whatsapp_cloud_sdk/_validators/messages.py
class ButtonContents(BaseModel):
"""
Represents the contents of a button.
Args:
id (str, optional): An optional button ID. Defaults to a UUID.
title (str): The title or label of the button.
Attributes:
None
"""
id: Optional[str] = str(uuid.uuid4())
title: constr(max_length=20, min_length=1)
# Path: whatsapp_cloud_sdk/_formaters/message_formatter.py
from enum import Enum
from typing import List, Optional
from unicodedata import decimal
from whatsapp_cloud_sdk._utils.types import JSONDict
from whatsapp_cloud_sdk._validators.messages import ButtonContents
"""This module contains custom formatting class and aliases for internal use within the library.
Warning:
Contents of this module are intended to be used internally by the library and *not* by the
user. Changes to this module are not considered breaking changes and may not be documented in
the changelog.
"""
class LinkTypes(Enum):
"""
Constants representing different types of links.
Attributes:
AUDIO (str): A link type for audio content.
IMAGE (str): A link type for image content.
VIDEO (str): A link type for video content.
"""
AUDIO = "audio"
IMAGE = "image"
VIDEO = "video"
class MessageFormatter:
"""
Provides methods for formatting messages and data for interaction with the WhatsApp API.
Methods:
- format_text_message(body: str, to: str, preview_url: bool = False,
message_id: str = None) -> JSONDict:
- format_button_message(to: str, text: str, buttons: List[ButtonContents],
message_id: Optional[str])
-> JSONDict:
- format_reply_with_reaction(to: str, emoji, message_id: Optional[str]) -> JSONDict:
- format_link_message(to: str, link: str, m_type: LinkTypes, caption: str = "",
message_id: str =None
-> JSONDict:
- format_send_document_by_url(to: str, document_link: str, caption: str,
is_reply: bool = False,
message_id: str = None) -> JSONDict:
- format_location_message(to: str, latitude: decimal, longitude: int, name: str,
address: str,
message_id: Optional[str])
-> JSONDict:
- format_contact_message(contact: list, to: str, message_id: Optional[str]) -> JSONDict:
- format_sticker_message_by_url(link: str, to: str, message_id: Optional[str]) -> JSONDict:
- mark_message_as_read(message_id: str) -> JSONDict:
"""
@staticmethod
def format_text_message(
body: str, to: str, preview_url: bool = False, message_id: str = None
) -> JSONDict:
"""
Formats a text message for WhatsApp.
Args:
- body (str): The text message body.
- to (str): The recipient's WhatsApp number.
- preview_url (bool, optional): Whether to preview URLs in the message.
- message_id (str, optional): The ID of the message being replied to.
Returns:
- JSONDict: The formatted text message.
"""
body = {
"messaging_product": "whatsapp",
"recipient_type": "individual",
"to": to,
"type": "text",
"text": {"preview_url": preview_url, "body": body},
}
if message_id:
body["context"] = {"message_id": message_id}
return body
@staticmethod
def format_button_message(
to: str,
text: str,
| buttons: List[ButtonContents], |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: DTennant/GPC
# Path: data/data_utils.py
def subsample_instances(dataset, prop_indices_to_subsample=0.8):
np.random.seed(0)
subsample_indices = np.random.choice(range(len(dataset)), replace=False,
size=(int(prop_indices_to_subsample * len(dataset)),))
return subsample_indices
# Path: config.py
_C = CN()
_C.MODEL = CN()
_C.MODEL.DEVICE = "cuda"
_C.MODEL.NAME = 'resnet50'
_C.MODEL.LAST_STRIDE = 1
_C.MODEL.LABEL_SMOOTH = False
_C.MODEL.PRETRAIN_PATH = ''
_C.INPUT = CN()
_C.INPUT.SIZE_TRAIN = [384, 128]
_C.INPUT.SIZE_TEST = [384, 128]
_C.INPUT.PROB = 0.0
_C.INPUT.RE_PROB = 0.0
_C.INPUT.PIXEL_MEAN = [0.485, 0.456, 0.406]
_C.INPUT.PIXEL_STD = [0.229, 0.224, 0.225]
_C.INPUT.PADDING = 10
_C.DATASETS = CN()
_C.DATASETS.NAMES = ('market1501')
_C.DATASETS.DATA_PATH = '/home/zbc/data/market1501/'
_C.DATASETS.TRAIN_PATH = 'bounding_box_train'
_C.DATASETS.QUERY_PATH = 'query'
_C.DATASETS.GALLERY_PATH = 'bounding_box_test'
_C.DATALOADER = CN()
_C.DATALOADER.NUM_WORKERS = 8
_C.DATALOADER.SAMPLER = 'softmax'
_C.DATALOADER.NUM_INSTANCE = 16
_C.SOLVER = CN()
_C.SOLVER.OPTIMIZER_NAME = "Adam"
_C.SOLVER.FP16 = False
_C.SOLVER.MAX_EPOCHS = 50
_C.SOLVER.BASE_LR = 3e-4
_C.SOLVER.BIAS_LR_FACTOR = 2
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.MARGIN = 0.3
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.WEIGHT_DECAY_BIAS = 0.
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.STEPS = (30, 55)
_C.SOLVER.WARMUP_FACTOR = 1.0 / 3
_C.SOLVER.WARMUP_ITERS = 500
_C.SOLVER.WARMUP_METHOD = "linear"
_C.SOLVER.CHECKPOINT_PERIOD = 50
_C.SOLVER.LOG_PERIOD = 100
_C.SOLVER.EVAL_PERIOD = 50
_C.SOLVER.IMS_PER_BATCH = 64
_C.SOLVER.CYTHON = True
_C.TEST = CN()
_C.TEST.IMS_PER_BATCH = 128
_C.TEST.WEIGHT = ""
_C.TEST.DEBUG = False
_C.TEST.MULTI_GPU = False
_C.TEST.RERANK = True
_C.OUTPUT_DIR = ""
# Path: data/imagenet.py
import torchvision
import numpy as np
import os
from copy import deepcopy
from data.data_utils import subsample_instances
from config import imagenet_root
class ImageNetBase(torchvision.datasets.ImageFolder):
def __init__(self, root, transform):
super(ImageNetBase, self).__init__(root, transform)
self.uq_idxs = np.array(range(len(self)))
def __getitem__(self, item):
img, label = super().__getitem__(item)
uq_idx = self.uq_idxs[item]
return img, label, uq_idx
def subsample_dataset(dataset, idxs):
imgs_ = []
for i in idxs:
imgs_.append(dataset.imgs[i])
dataset.imgs = imgs_
samples_ = []
for i in idxs:
samples_.append(dataset.samples[i])
dataset.samples = samples_
# dataset.imgs = [x for i, x in enumerate(dataset.imgs) if i in idxs]
# dataset.samples = [x for i, x in enumerate(dataset.samples) if i in idxs]
dataset.targets = np.array(dataset.targets)[idxs].tolist()
dataset.uq_idxs = dataset.uq_idxs[idxs]
return dataset
def subsample_classes(dataset, include_classes=list(range(1000))):
cls_idxs = [x for x, t in enumerate(dataset.targets) if t in include_classes]
target_xform_dict = {}
for i, k in enumerate(include_classes):
target_xform_dict[k] = i
dataset = subsample_dataset(dataset, cls_idxs)
dataset.target_transform = lambda x: target_xform_dict[x]
return dataset
def get_train_val_indices(train_dataset, val_split=0.2):
train_classes = list(set(train_dataset.targets))
# Get train/test indices
train_idxs = []
val_idxs = []
for cls in train_classes:
cls_idxs = np.where(np.array(train_dataset.targets) == cls)[0]
v_ = np.random.choice(cls_idxs, replace=False, size=((int(val_split * len(cls_idxs))),))
t_ = [x for x in cls_idxs if x not in v_]
train_idxs.extend(t_)
val_idxs.extend(v_)
return train_idxs, val_idxs
def get_equal_len_datasets(dataset1, dataset2):
"""
Make two datasets the same length
"""
if len(dataset1) > len(dataset2):
rand_idxs = np.random.choice(range(len(dataset1)), size=(len(dataset2, )))
subsample_dataset(dataset1, rand_idxs)
elif len(dataset2) > len(dataset1):
rand_idxs = np.random.choice(range(len(dataset2)), size=(len(dataset1, )))
subsample_dataset(dataset2, rand_idxs)
return dataset1, dataset2
def get_imagenet_100_datasets(train_transform, test_transform, train_classes=range(80),
prop_train_labels=0.8, split_train_val=False, seed=0):
np.random.seed(seed)
# Subsample imagenet dataset initially to include 100 classes
subsampled_100_classes = np.random.choice(range(1000), size=(100,), replace=False)
subsampled_100_classes = np.sort(subsampled_100_classes)
print(f'Constructing ImageNet-100 dataset from the following classes: {subsampled_100_classes.tolist()}')
cls_map = {i: j for i, j in zip(subsampled_100_classes, range(100))}
# Init entire training set
| imagenet_training_set = ImageNetBase(root=os.path.join(imagenet_root, 'train'), transform=train_transform) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: camenduru/MiniGPT-v2-hf
# Path: minigpt4/common/dist_utils.py
def download_cached_file(url, check_hash=True, progress=False):
"""
Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.
If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded.
"""
def get_cached_file_path():
# a hack to sync the file path across processes
parts = torch.hub.urlparse(url)
filename = os.path.basename(parts.path)
cached_file = os.path.join(timm_hub.get_cache_dir(), filename)
return cached_file
if is_main_process():
timm_hub.download_cached_file(url, check_hash, progress)
if is_dist_avail_and_initialized():
dist.barrier()
return get_cached_file_path()
# Path: minigpt4/common/dist_utils.py
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
# Path: minigpt4/common/utils.py
def get_abs_path(rel_path):
return os.path.join(registry.get_path("library_root"), rel_path)
# Path: minigpt4/common/utils.py
def is_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
# Path: minigpt4/models/eva_vit.py
def create_eva_vit_g(img_size=224,drop_path_rate=0.4,use_checkpoint=False,precision="fp16"):
model = VisionTransformer(
img_size=img_size,
patch_size=14,
use_mean_pooling=False,
embed_dim=1408,
depth=39,
num_heads=1408//88,
mlp_ratio=4.3637,
qkv_bias=True,
drop_path_rate=drop_path_rate,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
use_checkpoint=use_checkpoint,
)
url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth"
cached_file = download_cached_file(
url, check_hash=False, progress=True
)
state_dict = torch.load(cached_file, map_location="cpu")
interpolate_pos_embed(model,state_dict)
incompatible_keys = model.load_state_dict(state_dict, strict=False)
# print(incompatible_keys)
if precision == "fp16":
# model.to("cuda")
convert_weights_to_fp16(model)
return model
# Path: minigpt4/models/base_model.py
import os
import logging
import contextlib
import numpy as np
import torch
import torch.nn as nn
from omegaconf import OmegaConf
from transformers import BertTokenizer, LlamaTokenizer
from transformers.models.llama.modeling_llama import LlamaForCausalLM
from peft import (
LoraConfig,
get_peft_model,
prepare_model_for_int8_training,
)
from minigpt4.common.dist_utils import download_cached_file, is_dist_avail_and_initialized
from minigpt4.common.utils import get_abs_path, is_url
from minigpt4.models.eva_vit import create_eva_vit_g
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class BaseModel(nn.Module):
"""Base class for models."""
def __init__(self):
super().__init__()
@property
def device(self):
return list(self.parameters())[-1].device
def load_checkpoint(self, url_or_filename):
"""
Load from a finetuned checkpoint.
This should expect no mismatch in the model keys and the checkpoint keys.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
if "model" in checkpoint.keys():
state_dict = checkpoint["model"]
else:
state_dict = checkpoint
msg = self.load_state_dict(state_dict, strict=False)
logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
@classmethod
def from_pretrained(cls, model_type):
"""
Build a pretrained model from default configuration file, specified by model_type.
Args:
- model_type (str): model type, specifying architecture and checkpoints.
Returns:
- model (nn.Module): pretrained or finetuned model, depending on the configuration.
"""
model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model
model = cls.from_config(model_cfg)
return model
@classmethod
def default_config_path(cls, model_type):
assert (
model_type in cls.PRETRAINED_MODEL_CONFIG_DICT
), "Unknown model type {}".format(model_type)
| return get_abs_path(cls.PRETRAINED_MODEL_CONFIG_DICT[model_type]) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: deepghs/sdeval
# Path: sdeval/utils/images.py
def _yield_images(images: ImagesTyping) -> Iterator[Image.Image]:
def load_images(images: ImagesTyping) -> List[Image.Image]:
# Path: sdeval/utils/tqdm_.py
def tqdm(*args, silent: bool = False, **kwargs):
"""
An enhanced version of tqdm (progress bar) with an option to silence the output.
This function modifies the behavior of tqdm to allow silencing the progress bar.
:param args: Positional arguments to be passed to tqdm.
:param silent: If True, the progress bar content will not be displayed.
:type silent: bool
:param kwargs: Additional keyword arguments to be passed to tqdm.
:return: tqdm progress bar.
:rtype: tqdm.std.tqdm
"""
with io.StringIO() as sio:
if silent:
kwargs['file'] = sio
return _origin_tqdm(*args, **kwargs)
# Path: sdeval/corrupt/aicorrupt.py
import json
import numpy as np
from functools import lru_cache
from typing import Tuple, Optional, Mapping
from PIL import Image
from huggingface_hub import hf_hub_download
from imgutils.data import rgb_encode, ImageTyping, load_image
from imgutils.utils import open_onnx_model
from ..utils import ImagesTyping, load_images, tqdm
@lru_cache()
def _open_anime_aicop_meta(model_name: str):
"""
Open the meta information of the AI image corrupted detection model.
This function downloads and opens the meta information of the AI image corrupted detection model specified by the given model name using Hugging Face Hub.
:param model_name: The name of the AI image corrupted detection model.
:type model_name: str
:return: The opened meta information of the AI image corrupted detection model.
:rtype: dict
"""
with open(hf_hub_download(
f'deepghs/ai_image_corrupted',
f'{model_name}/meta.json',
), 'r', encoding='utf-8') as f:
return json.load(f)
@lru_cache()
def _open_anime_aicop_labels(model_name: str):
"""
Open the labels of the AI image corrupted detection model.
This function opens the labels of the AI image corrupted detection model specified by the given model name.
:param model_name: The name of the AI image corrupted detection model.
:type model_name: str
:return: The labels of the AI image corrupted detection model.
:rtype: List[str]
"""
return _open_anime_aicop_meta(model_name)['labels']
def _img_encode(image: Image.Image, size: Tuple[int, int] = (384, 384),
normalize: Optional[Tuple[float, float]] = (0.5, 0.5)):
"""
Encode the image for AI image corrupted detection.
This function resizes and encodes the image for AI image corrupted detection.
:param image: The input image.
:type image: Image.Image
:param size: The target size for encoding. Default is (384, 384).
:type size: Tuple[int, int]
:param normalize: The normalization parameters. Default is (0.5, 0.5).
:type normalize: Optional[Tuple[float, float]]
:return: The encoded image data.
:rtype: np.ndarray
"""
image = image.resize(size, Image.BILINEAR)
data = rgb_encode(image, order_='CHW')
if normalize is not None:
mean_, std_ = normalize
mean = np.asarray([mean_]).reshape((-1, 1, 1))
std = np.asarray([std_]).reshape((-1, 1, 1))
data = (data - mean) / std
return data.astype(np.float32)
def get_ai_corrupted(image: ImageTyping, model_name: str = _DEFAULT_MODEL_NAME) -> Mapping[str, float]:
"""
Get AI image corrupted detection scores for an image.
This function calculates AI image corrupted detection scores for a given image using the specified model.
:param image: The input image.
:type image: ImageTyping
:param model_name: The name of the AI image corrupted detection model. Default is 'caformer_s36_v0_focal'.
:type model_name: str
:return: A dictionary containing the corrupted score.
:rtype: Mapping[str, float]
"""
image = load_image(image, force_background='white', mode='RGB')
input_ = _img_encode(image)[None, ...]
output, = _open_anime_aicop_model(model_name).run(['output'], {'input': input_})
return dict(zip(_open_anime_aicop_labels(model_name), output[0].tolist()))
class AICorruptMetrics:
"""
Class for calculating an AI image corruptness score.
The `AICorruptMetrics` class allows you to calculate an AI image corruptness score using the AI image corrupted detection model.
:param model_name: The name of the AI image corrupted detection model. Default is 'caformer_s36_v0_focal'.
:type model_name: str
:param silent: If True, suppresses progress bars and additional output during calculation.
:type silent: bool
:param tqdm_desc: Description for the tqdm progress bar during calculation.
:type tqdm_desc: str
"""
def __init__(self, model_name: str = _DEFAULT_MODEL_NAME,
silent: bool = False, tqdm_desc: str = None):
self._model_name = model_name
self.silent = silent
self.tqdm_desc = tqdm_desc or self.__class__.__name__
def score(self, images: ImagesTyping, silent: bool = None):
"""
Calculate the AI image corruptness score for a set of images.
This method calculates the AI image corruptness score for a set of input images using the AI image corrupted detection model.
:param images: The set of input images for calculating the AI image corruptness score.
:type images: ImagesTyping
:param silent: If True, suppresses progress bars and additional output during calculation.
:type silent: bool
:return: The AI image corruptness score.
:rtype: float
"""
| image_list = load_images(images) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: WHUlwb/Assisted_learning
# Path: hrnet/backbone.py
BN_MOMENTUM = 0.1
# Path: hrnet/backbone.py
def hrnet_classification(backbone='hrnetv2_w18'):
model = HighResolutionNet_Classification(num_classes=1000, backbone=backbone)
return model
# Path: hrnet/hrnet.py
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .backbone import BN_MOMENTUM, hrnet_classification
class HRnet_Backbone(nn.Module):
def __init__(self, in_channel, backbone = 'hrnetv2_w18'):
super(HRnet_Backbone, self).__init__()
self.model = hrnet_classification(backbone = backbone)
del self.model.incre_modules
del self.model.downsamp_modules
del self.model.final_layer
del self.model.classifier
self.conv1 = nn.Conv2d(in_channel, 64, kernel_size=3, stride=2, padding=1, bias=False)
def forward(self, x):
# x = self.model.conv1(x) # 原来的
x = self.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.conv2(x)
x = self.model.bn2(x)
x = self.model.relu(x)
x = self.model.layer1(x)
x_list = []
for i in range(2):
if self.model.transition1[i] is not None:
x_list.append(self.model.transition1[i](x))
else:
x_list.append(x)
y_list = self.model.stage2(x_list)
x_list = []
for i in range(3):
if self.model.transition2[i] is not None:
if i < 2:
x_list.append(self.model.transition2[i](y_list[i]))
else:
x_list.append(self.model.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.model.stage3(x_list)
x_list = []
for i in range(4):
if self.model.transition3[i] is not None:
if i < 3:
x_list.append(self.model.transition3[i](y_list[i]))
else:
x_list.append(self.model.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.model.stage4(x_list)
return y_list
class HRnet(nn.Module):
def __init__(self, in_channel, num_classes = 21, backbone = 'hrnetv2_w18', pretrained = True):
super(HRnet, self).__init__()
self.backbone = HRnet_Backbone(in_channel, backbone = backbone, pretrained = pretrained)
last_inp_channels = np.int(np.sum(self.backbone.model.pre_stage_channels))
self.last_layer = nn.Sequential(
nn.Conv2d(in_channels=last_inp_channels, out_channels=last_inp_channels, kernel_size=1, stride=1, padding=0),
| nn.BatchNorm2d(last_inp_channels, momentum=BN_MOMENTUM),
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dagedarr/telegram-budget
# Path: core/crud.py
async def get_by_id(
model: ModelType,
obj_id: int,
session: AsyncSession
) -> ModelType:
"""
Получение объекта по ID.
Parameters:
- model (ModelType): Тип модели SQLAlchemy.
- obj_id (int): Идентификатор объекта.
- session (AsyncSession): Асинхронная сессия для взаимодействия с БД.
Returns:
ModelType: Объект модели, найденный по ID.
"""
get_obj_in_db = await session.execute(
select(model).where(model.id == obj_id)
)
return get_obj_in_db.scalars().first()
# Path: core/crud.py
async def update(
db_obj: ModelType,
obj_in: dict,
session: AsyncSession,
) -> ModelType:
"""
Изменение значений полей объекта.
Parameters:
- db_obj (ModelType): Объект модели для обновления.
- obj_in (dict): Словарь с новыми значениями полей.
- session (AsyncSession): Асинхронная сессия для взаимодействия с БД.
Returns:
ModelType: Обновленный объект модели.
"""
for field in obj_in:
setattr(db_obj, field, obj_in[field])
session.add(db_obj)
await session.commit()
await session.refresh(db_obj)
return db_obj
# Path: filters/user_filters.py
class IsEndOnboardingFilter(BaseFilter):
"""
Фильтр для проверки прохождения онбординга.
"""
async def __call__(self, message: Message) -> bool:
session = await get_async_session()
user = await get_by_id(
model=User,
obj_id=message.from_user.id,
session=session,
)
if user:
await session.close()
return user.is_onboarding
await session.close()
return False
# Path: forms/user_form.py
class RegistrationForm(StatesGroup):
username = State()
mail = State()
# Path: keyboards/user_keyboards.py
def set_info_keyboard(is_onboarding=False) -> InlineKeyboardMarkup:
"""Клавиатура изменения данных пользователя."""
builder = InlineKeyboardBuilder()
builder.add(InlineKeyboardButton(
text='Ввести почту' if is_onboarding else 'Поменять почту',
callback_data='get_mail')
)
builder.add(InlineKeyboardButton(
text='Поменять имя',
callback_data='get_username')
)
if is_onboarding:
builder.add(InlineKeyboardButton(
text='Завершить регистрацию',
callback_data='registration_end')
)
else:
builder.add(InlineKeyboardButton(
text='Назад',
callback_data='other')
)
builder.adjust(2)
return builder.as_markup()
# Path: keyboards/user_keyboards.py
def universal_keyboard(
buttons: List[Tuple[str, Union[str, CallbackData]]],
buttons_per_row: int = 1,
) -> InlineKeyboardMarkup:
"""Универсальная клавиатура с кнопками колбека."""
builder = InlineKeyboardBuilder()
if len(buttons) == 1:
text, data = buttons[0]
builder.add(InlineKeyboardButton(text=text, callback_data=data))
else:
line = []
for text, data in buttons:
line.append(
InlineKeyboardButton(text=text, callback_data=data)
)
builder.add(*line)
builder.adjust(buttons_per_row)
return builder.as_markup()
# Path: models/user.py
class User(Base):
"""Модель пользователя."""
username = Column(String(64), nullable=True)
email = Column(String(254), unique=True, index=True, nullable=True)
registration_time = Column(BigInteger) # Время в формате Unix.
is_onboarding = Column(Boolean, default=False)
categories = relationship(
'Category', back_populates='user',
cascade='all, delete-orphan', lazy='selectin'
)
aliases = relationship(
'Alias', back_populates='user', cascade='all, delete-orphan'
)
transactions = relationship(
'Transaction', back_populates='user',
cascade='all, delete-orphan', lazy='selectin'
)
# Path: utils/user_actions.py
async def callback_message(
target: Union[Message, CallbackQuery],
text: str,
reply_markup: InlineKeyboardMarkup = None,
replace_message: bool = False,
delete_reply: bool = True,
**kwargs,
):
"""Редактировние сообщения."""
target = target if isinstance(target, Message) else target.message
if replace_message:
await target.edit_text(
text=text,
reply_markup=reply_markup,
**kwargs
)
else:
await target.answer(
text=text,
reply_markup=reply_markup,
**kwargs
)
await target.delete_reply_markup() if delete_reply else None
# Path: handlers/change_info_handler.py
from aiogram import F, Router
from aiogram.fsm.context import FSMContext
from aiogram.types import CallbackQuery, Message
from sqlalchemy.ext.asyncio import AsyncSession
from core.crud import get_by_id, update
from filters import IsEndOnboardingFilter
from forms import RegistrationForm
from keyboards import set_info_keyboard, universal_keyboard
from models import User
from utils.user_actions import callback_message
router = Router(name='change_info_router')
@router.callback_query(F.data == 'change_info')
async def change_info(callback: CallbackQuery):
"""Выводит Категории и Статистику и осльной функционал."""
await callback_message(
target=callback,
text='Изменить данные о себе',
| reply_markup=set_info_keyboard(), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nchen909/Pass-Tuning
# Path: evaluator/CodeBLEU/parser/utils.py
def remove_comments_and_docstrings(source, lang):
if lang in ['python']:
"""
Returns 'source' minus comments and docstrings.
"""
io_obj = StringIO(source)
out = ""
prev_toktype = tokenize.INDENT
last_lineno = -1
last_col = 0
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
start_line, start_col = tok[2]
end_line, end_col = tok[3]
ltext = tok[4]
if start_line > last_lineno:
last_col = 0
if start_col > last_col:
out += (" " * (start_col - last_col))
# Remove comments:
if token_type == tokenize.COMMENT:
pass
# This series of conditionals removes docstrings:
elif token_type == tokenize.STRING:
if prev_toktype != tokenize.INDENT:
# This is likely a docstring; double-check we're not inside an operator:
if prev_toktype != tokenize.NEWLINE:
if start_col > 0:
out += token_string
else:
out += token_string
prev_toktype = token_type
last_col = end_col
last_lineno = end_line
temp = []
for x in out.split('\n'):
if x.strip() != "":
temp.append(x)
return '\n'.join(temp)
elif lang in ['ruby']:
return source
else:
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return " " # note: a space and not an empty string
else:
return s
pattern = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE
)
temp = []
for x in re.sub(pattern, replacer, source).split('\n'):
if x.strip() != "":
temp.append(x)
return '\n'.join(temp)
# Path: evaluator/CodeBLEU/parser/utils.py
def tree_to_token_index(root_node):
if (len(root_node.children) == 0 or root_node.type in ['string_literal', 'string',
'character_literal']) and root_node.type != 'comment':
return [(root_node.start_point, root_node.end_point)]
else:
code_tokens = []
for child in root_node.children:
code_tokens += tree_to_token_index(child)
return code_tokens
# Path: evaluator/CodeBLEU/parser/utils.py
def index_to_code_token(index, code):
start_point = index[0]
end_point = index[1]
if start_point[0] == end_point[0]:
s = code[start_point[0]][start_point[1]:end_point[1]]
else:
s = ""
s += code[start_point[0]][start_point[1]:]
for i in range(start_point[0] + 1, end_point[0]):
s += code[i]
s += code[end_point[0]][:end_point[1]]
return s
# Path: evaluator/CodeBLEU/parser/utils.py
def tree_to_variable_index(root_node, index_to_code):
if (len(root_node.children) == 0 or root_node.type in ['string_literal', 'string',
'character_literal']) and root_node.type != 'comment':
index = (root_node.start_point, root_node.end_point)
_, code = index_to_code[index]
if root_node.type != code:
return [(root_node.start_point, root_node.end_point)]
else:
return []
else:
code_tokens = []
for child in root_node.children:
code_tokens += tree_to_variable_index(child, index_to_code)
return code_tokens
# Path: evaluator/CodeBLEU/parser/DFG.py
from tree_sitter import Language, Parser
from .utils import (remove_comments_and_docstrings,
tree_to_token_index,
index_to_code_token,
tree_to_variable_index)
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
def DFG_python(root_node,index_to_code,states):
assignment=['assignment','augmented_assignment','for_in_clause']
if_statement=['if_statement']
for_statement=['for_statement']
while_statement=['while_statement']
do_first_statement=['for_in_clause']
def_statement=['default_parameter']
states=states.copy()
if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':
idx,code=index_to_code[(root_node.start_point,root_node.end_point)]
if root_node.type==code:
return [],states
elif code in states:
return [(code,idx,'comesFrom',[code],states[code].copy())],states
else:
if root_node.type=='identifier':
states[code]=[idx]
return [(code,idx,'comesFrom',[],[])],states
elif root_node.type in def_statement:
name=root_node.child_by_field_name('name')
value=root_node.child_by_field_name('value')
DFG=[]
if value is None:
| indexs=tree_to_variable_index(name,index_to_code) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kavisha725/MBNSF
# Path: utils/o3d_uitls.py
def extract_clusters_dbscan(cloud, eps = 0.9, min_points=10, return_clusters= False, return_colored_pcd=False):
pcl = copy.deepcopy(cloud)
pcl = make_open3d_point_cloud(pcl)
labels = np.array(
pcl.cluster_dbscan(eps=eps, min_points=min_points, print_progress=True))
if return_colored_pcd:
cmap = plt.get_cmap("tab20")
max_label = labels.max()
print("Has %d clusters" % (max_label + 1))
colors = cmap(labels / (max_label if max_label > 0 else 1))
colors[labels < 0] = 0
pcl.colors = o3d.utility.Vector3dVector(colors[:, :3])
# o3d.visualization.draw_geometries([pcl])
# save_view_point(pcl, 'pcd_viewpoint.json')
load_view_point(pcl, 'pcd_viewpoint.json')
clusters = []
if return_clusters:
label_ids = np.delete(np.unique(labels), 0)
for id in label_ids:
clusters.append(cloud[labels == id])
clusters = np.asarray(clusters)
if return_colored_pcd:
return labels, clusters, pcl
return labels, clusters
else:
if return_colored_pcd:
return labels, pcl
return labels
# Path: utils/sc_utils.py
def spatial_consistency_loss(src_keypts, tgt_keypts, d_thre=0.1, max_points = 3000):
"""
Input:
- src_keypts: [bs, num_corr, 3]
- tgt_keypts: [bs, num_corr, 3]
Output:
- sc_loss: [bs, 1], the spatial consistency loss.
"""
bs, num_corr = src_keypts.shape[0], tgt_keypts.shape[1]
# (Optional) random sample points
if num_corr > max_points:
rand_perm = torch.randperm(num_corr)
rand_idx = rand_perm[:max_points]
src_keypts = src_keypts[:, rand_idx, :]
tgt_keypts = tgt_keypts[:, rand_idx, :]
# Spatial Consistency Adjacency Matrix
src_dist = torch.norm((src_keypts[:, :, None, :] - src_keypts[:, None, :, :]), dim=-1)
target_dist = torch.norm((tgt_keypts[:, :, None, :] - tgt_keypts[:, None, :, :]), dim=-1)
cross_dist = torch.abs(src_dist - target_dist)
adj_mat = torch.clamp(1.0 - cross_dist ** 2 / d_thre ** 2, min=0)
# Spatial Consistency Loss
lead_eigvec = power_iteration(adj_mat)
sc_score = spatial_consistency_score( adj_mat, lead_eigvec)
sc_loss = -torch.log(sc_score)
return sc_loss
# Path: trajectory_estimation/mbnt.py
import os, glob
import argparse
import logging
import csv
import numpy as np
import torch
import sys
import pytorch3d.loss as p3dloss
from utils.general_utils import *
from utils.ntp_utils import *
from utils.o3d_uitls import extract_clusters_dbscan
from utils.sc_utils import spatial_consistency_loss
# Long-term trajectory estimation with MBNT.
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
logger = logging.getLogger(__name__)
def total_sc_loss(labels_t, label_ids, pc, pc_defored, d_thresh=0.03, max_points=3000):
loss_sc = None
for id in label_ids:
cluster = pc[labels_t == id]
cluster_deformed = pc_defored[labels_t == id]
assert cluster.shape == cluster_deformed.shape
cluster_cs_loss = spatial_consistency_loss(cluster.unsqueeze(0), cluster_deformed.unsqueeze(0), d_thre=d_thresh, max_points=max_points)
if not loss_sc:
loss_sc = cluster_cs_loss
else: loss_sc += cluster_cs_loss
loss_sc /= len(label_ids)
return loss_sc.squeeze()
def fit_trajectory_field(
exp_dir,
pc_list,
options,
flow_gt_list = None,
traj_gt = None,
traj_val_mask = None
):
csv_file = open(f"{exp_dir}/metrics.csv", 'w')
metric_labels = ['train_loss', 'train_chamfer_loss', 'train_sc_loss', 'train_consist_loss', 'traj_consist', 'epe', 'acc_strict', 'acc_relax', 'angle_error', 'outlier']
csv_writer = csv.DictWriter(csv_file, ['itr'] + metric_labels + ['traj_metric'])
csv_writer.writeheader()
n_lidar_sweeps = len(pc_list)
if traj_gt is not None and traj_val_mask is not None:
traj_gt = torch.from_numpy(traj_gt).cuda()
traj_val_mask = torch.from_numpy(traj_val_mask).cuda()
# ANCHOR: Initialize the trajectory field
net = NeuralTrajField(traj_len=n_lidar_sweeps,
filter_size=options.hidden_units,
act_fn=options.act_fn, traj_type=options.traj_type, st_embed_type=options.st_embed_type)
net.to(options.device)
optimizer = torch.optim.Adam(net.parameters(), lr=options.lr, weight_decay=options.weight_decay)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=500, gamma=0.5)
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[200,400,600,800], gamma=0.5)
# Pre-compute clusters:
labels_database, label_ids_database = [], []
for fid in range(n_lidar_sweeps):
| labels = extract_clusters_dbscan(pc_list[fid], eps = options.sc_cluster_eps, min_points=options.sc_cluster_min_points, return_clusters= False, return_colored_pcd=False) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: cool-dev-guy/tkmoderngl
# Path: tkmoderngl/framebuffer.py
class FramebufferImage(ImageTk.PhotoImage):
def __init__(self, master, ctx, size):
super(FramebufferImage, self).__init__(Image.new('RGB', size, (0, 0, 0)))
self.ctx = ctx
self.fbo = self.ctx.simple_framebuffer(size)
self.scope = self.ctx.scope(self.fbo)
def __enter__(self):
self.scope.__enter__()
def __exit__(self, *args):
self.scope.__exit__(*args)
self.paste(Image.frombytes('RGB', self.fbo.size, self.fbo.read(), 'raw', 'RGB', 0, -1))
# Path: tkmoderngl/renderer.py
class Canvas:
def __init__(self, ctx, reserve='4MB'):
self.ctx = ctx
self.prog = self.ctx.program(
vertex_shader='''
#version 330
uniform vec2 Pan;
in vec2 in_vert;
in vec4 in_color;
out vec4 v_color;
void main() {
v_color = in_color;
gl_Position = vec4(in_vert - Pan, 0.0, 1.0);
}
''',
fragment_shader='''
#version 330
in vec4 v_color;
out vec4 f_color;
void main() {
f_color = v_color;
}
''',
)
self.vbo = ctx.buffer(reserve='4MB', dynamic=True)
self.vao = ctx.simple_vertex_array(self.prog, self.vbo, 'in_vert', 'in_color')
def pan(self, pos):
self.prog['Pan'].value = pos
def clear(self, color=(0, 0, 0, 0)):
self.ctx.clear(*color)
def plot(self, points, type='line'):
data = points.astype('f4').tobytes()
self.vbo.orphan()
self.vbo.write(data)
if type == 'line':
self.ctx.line_width = 1.0
self.vao.render(moderngl.LINE_STRIP, vertices=len(data) // 24)
if type == 'points':
self.ctx.point_size = 3.0
self.vao.render(moderngl.POINTS, vertices=len(data) // 24)
# Path: tkmoderngl/renderer.py
class PanTool:
def __init__(self):
self.total_x = 0.0
self.total_y = 0.0
self.start_x = 0.0
self.start_y = 0.0
self.delta_x = 0.0
self.delta_y = 0.0
self.drag = False
def start_drag(self, x, y):
self.start_x = x
self.start_y = y
self.drag = True
def dragging(self, x, y):
if self.drag:
self.delta_x = (x - self.start_x) * 2.0
self.delta_y = (y - self.start_y) * 2.0
def stop_drag(self, x, y):
if self.drag:
self.dragging(x, y)
self.total_x -= self.delta_x
self.total_y += self.delta_y
self.delta_x = 0.0
self.delta_y = 0.0
self.drag = False
@property
def value(self):
return (self.total_x - self.delta_x, self.total_y + self.delta_y)
# Path: main.py
import tkinter as tk
import moderngl
import numpy as np
from tkmoderngl.framebuffer import FramebufferImage
from tkmoderngl.renderer import Canvas, PanTool
"""
code from moderngl/examples
modified by : cool-dev-guy
"""
# the moderngl widget
class GlWidget(tk.Label):
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
self.parent = args[0]
self._ctx = moderngl.create_standalone_context()
self._tkfbo = FramebufferImage(args[0],self._ctx,(500,500))
| self._canvas = Canvas(self._ctx) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: G3VV/Yank
# Path: util/spotify.py
def start_token_thread():
client_id = spotify_id
client_secret = spotify_secret
get_access_token(client_id, client_secret)
# Path: util/download.py
async def start(id):
isrc = id
try:
try:
track = await spotify_isrc(isrc)
except Exception as e:
print("Spotify token expired or couldn't find isrc")
print(" ")
print(e)
return "none"
if 'isrc' in track['external_ids']:
isrc = track['external_ids']['isrc']
else:
isrc = "ISRC not available"
print("Song not found")
return "none"
j = await get_deezer_track(isrc)
pathfile = Path(f"./music/{isrc}.mp3")
if pathfile.is_file():
print(f"[{isrc}] Already cached")
return pathfile
else:
print(f"[{isrc}] Not cached")
try:
track_id = j["id"]
except:
print("Couldn't find song on deezer")
return "none"
loop = asyncio.get_event_loop()
download_track(track_id, isrc)
return pathfile
except Exception as e:
print(f"{e} at line {sys.exc_info()[-1].tb_lineno}")
return "none"
# Path: util/download.py
async def start_playlist(id):
folder_to_zip = f'./music/{id}/'
output_zip_file = f'./zip/{id}'
def zip_folder(folder_path, output_path):
print(f"[playlist] Zipping folder {folder_path} to {output_path}")
shutil.make_archive(output_path, 'zip', folder_path)
print(f"[playlist] Finished zipping folder {folder_path} to {output_path}")
isrc = id
try:
if os.path.exists(folder_to_zip):
return output_zip_file + ".zip"
try:
playlist_isrcs = await spotify_playlist(isrc)
except Exception as e:
print("Spotify token expired or couldn't find isrc")
print(" ")
print(e)
return "none"
deezer_ids = []
for index in range(len(playlist_isrcs)):
try:
j = await get_deezer_track(playlist_isrcs[index])
print(j["id"])
deezer_ids.append(f'{j["id"]}')
except:
print("Couldn't find song on deezer")
continue
#return deezer_ids
download_playlist(deezer_ids, id)
zip_folder(folder_to_zip, output_zip_file)
return output_zip_file + ".zip"
except Exception as e:
print(f"{e} at line {sys.exc_info()[-1].tb_lineno}")
return "none"
# Path: index.py
from quart import Quart, send_file
from util.spotify import start_token_thread
from util.download import start, start_playlist
from dotenv import load_dotenv
import threading
import re
import os
import json
app = Quart(__name__)
load_dotenv()
port = os.environ.get("port")
@app.route('/track/<string:id>')
async def serve_audio(id):
filename = await start(id)
return await send_file(filename, mimetype='audio/mpeg')
@app.route('/')
async def serve_index():
return "online"
@app.route('/playlist/<string:id>')
async def serve_playlist(id):
| filename = await start_playlist(id) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: openfoodfacts/open-prices
# Path: app/db.py
# Path: app/enums.py
CURRENCIES = [(currency, currency) for currency in list_currencies()]
NODE = "NODE"
WAY = "WAY"
RELATION = "RELATION"
PRICE_TAG = "PRICE_TAG"
RECEIPT = "RECEIPT"
GDPR_REQUEST = "GDPR_REQUEST"
UNIT = "UNIT"
KILOGRAM = "KILOGRAM"
class LocationOSMEnum(Enum):
class ProofTypeEnum(Enum):
class PricePerEnum(Enum):
# Path: app/models.py
from openfoodfacts import Flavor
from sqlalchemy import (
JSON,
BigInteger,
Boolean,
Column,
Date,
DateTime,
ForeignKey,
Integer,
Numeric,
String,
)
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import Mapped, mapped_column, relationship
from sqlalchemy.sql import func
from sqlalchemy_utils import force_auto_coercion
from sqlalchemy_utils.types.choice import ChoiceType
from app.db import Base
from app.enums import CurrencyEnum, LocationOSMEnum, PricePerEnum, ProofTypeEnum
force_auto_coercion()
JSONVariant = JSON().with_variant(JSONB(), "postgresql")
class User(Base):
user_id = Column(String, primary_key=True, index=True)
token = Column(String, unique=True, index=True)
last_used = Column(DateTime(timezone=True))
price_count = Column(Integer, nullable=False, server_default="0", index=True)
created = Column(DateTime(timezone=True), server_default=func.now())
__tablename__ = "users"
class Product(Base):
id = Column(Integer, primary_key=True, index=True)
code = Column(String, unique=True, index=True)
source = Column(ChoiceType(Flavor))
product_name = Column(String)
product_quantity = Column(Integer)
brands = Column(String)
image_url = Column(String)
unique_scans_n = Column(Integer, nullable=False, server_default="0")
prices: Mapped[list["Price"]] = relationship(back_populates="product")
price_count = Column(Integer, nullable=False, server_default="0", index=True)
created = Column(DateTime(timezone=True), server_default=func.now())
updated = Column(DateTime(timezone=True), onupdate=func.now())
__tablename__ = "products"
class Location(Base):
id = Column(Integer, primary_key=True, index=True)
osm_id = Column(BigInteger)
osm_type = Column(ChoiceType(LocationOSMEnum))
osm_name = Column(String)
osm_display_name = Column(String)
osm_address_postcode = Column(String)
osm_address_city = Column(String)
osm_address_country = Column(String)
osm_lat = Column(Numeric(precision=11, scale=7))
osm_lon = Column(Numeric(precision=11, scale=7))
prices: Mapped[list["Price"]] = relationship(back_populates="location")
price_count = Column(Integer, nullable=False, server_default="0", index=True)
created = Column(DateTime(timezone=True), server_default=func.now())
updated = Column(DateTime(timezone=True), onupdate=func.now())
__tablename__ = "locations"
class Proof(Base):
id = Column(Integer, primary_key=True, index=True)
file_path = Column(String, nullable=False)
mimetype = Column(String, index=True)
| type = Column(ChoiceType(ProofTypeEnum)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: krasnoukhov/homeassistant-smart-maic
# Path: custom_components/smart_maic/const.py
DEVICE_NAME = "device_name"
# Path: custom_components/smart_maic/const.py
DEVICE_ID = "devid"
# Path: custom_components/smart_maic/const.py
DEVICE_TYPE = "devtype"
# Path: custom_components/smart_maic/const.py
DOMAIN = "smart_maic"
# Path: custom_components/smart_maic/const.py
IP_ADDRESS = CONF_IP_ADDRESS
# Path: custom_components/smart_maic/const.py
PIN = CONF_PIN
# Path: custom_components/smart_maic/smart_maic.py
class SmartMaic:
"""Smart MAIC instance."""
def __init__(self, data: dict[str, Any]) -> None:
"""Init Smart MAIC."""
self._ip_address = data[IP_ADDRESS]
self._pin = data[PIN]
self._devid = data.get(DEVICE_ID)
def get_wdata(self) -> dict[str, Any]:
"""Get "wdata" for Smart MAIC component."""
self._login_request()
return self._get_request(page="getwdata").json()
def get_config(self) -> dict[str, Any]:
"""Get config for Smart MAIC component."""
self._login_request()
return self._get_request(page="webinit").json()
def set_mqtt_config(self) -> dict[str, Any]:
"""Set Smart MAIC MQTT config."""
config = self.get_config()
self._get_request(
page="mqtt",
serv=config["serv"],
port=config["port"],
uname=config["uname"],
**{"pass": config["pass"]},
mqtt_on=1,
mqttint=5,
separat=2,
prefix=f"{PREFIX}/",
)
return self.get_config()
def set_consumption(self, key: str, value: float) -> None:
"""Set Smart MAIC consumption value."""
self._login_request()
self._get_request(page="initval", **{key: value})
def set_dry_switch(self, value: int) -> dict[str, Any]:
"""Set Smart MAIC dry switch."""
self._get_request(
page="getdata", devid=self._devid, devpass=self._pin, pout=value
)
def _login_request(self) -> None:
self._get_request(page="devlogin", devpass=self._pin)
def _get_request(self, **kwargs) -> requests.Response:
"""Make GET request to the Smart MAIC API."""
url = urlparse(f"http://{self._ip_address}/")
url = url._replace(query=urlencode(kwargs))
_LOGGER.debug(f"Smart MAIC request: GET {url.geturl()}")
try:
r = requests.get(url.geturl(), timeout=HTTP_TIMEOUT)
r.raise_for_status()
_LOGGER.debug(f"Smart MAIC status: {r.status_code}")
_LOGGER.debug(f"Smart MAIC response: {r.text}")
return r
except TimeoutError as timeout_error:
raise ConnectionError from timeout_error
except requests.exceptions.ConnectionError as connection_error:
raise ConnectionError from connection_error
except requests.exceptions.HTTPError as http_error:
if http_error.response.status_code == 400:
return r
raise ConnectionError from http_error
# Path: custom_components/smart_maic/coordinator.py
class SmartMaicCoordinator(DataUpdateCoordinator[dict[str, Any]]):
"""Smart MAIC Coordinator class."""
def __init__(self, smart_maic: SmartMaic, hass: HomeAssistant) -> None:
"""Initialize."""
self._smart_maic = smart_maic
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
)
def _get_config(self) -> None:
"""Get Smart MAIC config."""
return self._smart_maic.set_mqtt_config()
async def async_get_config(self) -> None:
"""Get Smart MAIC config."""
return await self.hass.async_add_executor_job(self._get_config)
def _set_mqtt_config(self) -> None:
"""Set Smart MAIC MQTT config."""
return self._smart_maic.set_mqtt_config()
async def async_set_mqtt_config(self) -> None:
"""Set Smart MAIC MQTT config."""
return await self.hass.async_add_executor_job(self._set_mqtt_config)
def _set_consumption(self, key: str, value: float) -> None:
"""Set Smart MAIC consumption value."""
return self._smart_maic.set_consumption(key=key, value=value)
async def async_set_consumption(self, key: str, value: float) -> None:
"""Set Smart MAIC consumption value."""
return await self.hass.async_add_executor_job(self._set_consumption, key, value)
def _set_dry_switch(self, value: int) -> None:
"""Set Smart MAIC dry switch value."""
return self._smart_maic.set_dry_switch(value=value)
async def async_set_dry_switch(self, value: int) -> None:
"""Set Smart MAIC dry switch value."""
return await self.hass.async_add_executor_job(self._set_dry_switch, value)
# Path: custom_components/smart_maic/config_flow.py
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from typing import Any
from homeassistant import config_entries
from homeassistant.components import mqtt
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import AbortFlow
from .const import (
DEVICE_NAME,
DEVICE_ID,
DEVICE_TYPE,
DOMAIN,
IP_ADDRESS,
PIN,
)
from .smart_maic import SmartMaic
from .coordinator import SmartMaicCoordinator
"""Config flow for Smart MAIC integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
USER_SCHEMA = vol.Schema(
{
vol.Required(IP_ADDRESS): cv.string,
vol.Required(PIN): cv.string,
vol.Required(DEVICE_NAME, default="Energy"): cv.string,
}
)
async def validate_input(hass: HomeAssistant, data: dict) -> dict[str, Any]:
"""Validate the user input allows us to connect.
Data has the keys from USER_SCHEMA with values provided by the user.
"""
if not await mqtt.async_wait_for_mqtt_client(hass):
raise AbortFlow("mqtt_unavailable")
smart_maic = SmartMaic(data)
| coordinator = SmartMaicCoordinator(smart_maic, hass) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: JoaoPedro9674/django-ledger
# Path: django_ledger/contrib/django_ledger_graphene/coa/schema.py
class ChartOfAccountsModelType(DjangoObjectType):
class Meta:
model = ChartOfAccountModel
fields = [
'uuid',
'slug',
'name',
'locked'
]
interfaces = (relay.Node,)
# Path: django_ledger/contrib/django_ledger_graphene/entity/schema.py
class EntityModelQuery(graphene.ObjectType):
entity_model_list_all = graphene.List(EntityModelType)
entity_model_list_visible = graphene.List(EntityModelType)
entity_model_list_hidden = graphene.List(EntityModelType)
entity_model_list_managed = graphene.List(EntityModelType)
entity_model_list_is_admin = graphene.List(EntityModelType)
entity_model_detail_by_uuid = graphene.Field(EntityModelTypeDetail, uuid=graphene.String(required=True))
entity_model_detail_by_slug = graphene.Field(EntityModelTypeDetail, slug=graphene.String(required=True))
@staticmethod
def get_base_queryset(info):
if info.context.resource_owner.is_authenticated:
return EntityModel.objects.for_user(user_model=info.context.resource_owner)
return EntityModel.objects.none()
# list ....
def resolve_entity_model_list_all(self, info, **kwargs):
return EntityModelQuery.get_base_queryset(info)
def resolve_entity_model_list_visible(self, info, **kwargs):
qs = EntityModelQuery.get_base_queryset(info)
return qs.visible()
def resolve_entity_model_list_hidden(self, info, **kwargs):
qs = EntityModelQuery.get_base_queryset(info)
return qs.hidden()
def resolve_entity_model_list_managed(self, info, **kwargs):
qs: EntityModelQuerySet = EntityModelQuery.get_base_queryset(info)
user_model = info.context.resource_owner
return qs.filter(managers__in=[user_model])
def resolve_entity_model_list_is_admin(self, info, **kwargs):
qs: EntityModelQuerySet = EntityModelQuery.get_base_queryset(info)
user_model = info.context.resource_owner
return qs.filter(admin=user_model)
# detail...
def resolve_entity_model_detail_by_slug(self, info, slug, **kwargs):
qs: EntityModelQuerySet = EntityModelQuery.get_base_queryset(info)
return qs.select_related('default_coa').get(slug__exact=slug)
def resolve_entity_model_detail_by_uuid(self, info, uuid, **kwargs):
qs: EntityModelQuerySet = EntityModelQuery.get_base_queryset(info)
return qs.select_related('default_coa').get(uuid__exact=uuid)
# Path: django_ledger/contrib/django_ledger_graphene/entity/schema.py
class EntityModelType(DjangoObjectType):
is_admin = graphene.Boolean()
def resolve_is_admin(self, info):
entity_model: EntityModel = self
return entity_model.is_admin_user(user_model=info.context.resource_owner)
class Meta:
model = EntityModel
fields = ENTITY_MODEL_BASE_FIELDS
filter_fields = {
'name': [
'exact',
'icontains',
'istartswith'
],
}
interfaces = (relay.Node,)
# Path: django_ledger/contrib/django_ledger_graphene/api.py
import graphene
from django_ledger.contrib.django_ledger_graphene.coa.schema import ChartOfAccountsModelType
from django_ledger.contrib.django_ledger_graphene.entity.schema import EntityModelQuery, EntityModelType
class Query(
EntityModelQuery,
# ChartOfAccountsModelQuery
# CustomerQuery,
# Bill_list_Query,
# Accountlist_Query,
# Bank_account_Query ,
# ChartOfAccountsQuery,
# UnitOfMeasureQuery,
# VendorsQuery,
# EntityUnitQuery,
# LedgerQuery,
# TransactionsQuery,
# JournalEntryQuery,
# PurchaseOrderQuery,
# QueryUser,
):
pass
# class Mutation(
# # CustomerMutations,
# # BankAccountMutations,
# # AuthMutation,
# ):
# pass
schema = graphene.Schema(
types=[
EntityModelType,
| ChartOfAccountsModelType |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: HLTCHKUST/InstructAlign
# Path: nlu_prompt.py
def get_prompt(prompt_lang):
if prompt_lang == 'EN':
return DATA_TO_EN_PROMPT
elif prompt_lang == 'EN2':
return DATA_TO_EN2_PROMPT
elif prompt_lang == 'EN3':
return DATA_TO_EN3_PROMPT
elif prompt_lang == 'ID':
return DATA_TO_ID_PROMPT
elif prompt_lang == 'ID2':
return DATA_TO_ID2_PROMPT
elif prompt_lang == 'ID3':
return DATA_TO_ID3_PROMPT
else:
raise ValueError(f'get_prompt() - Unknown prompt_lang `{prompt_lang}` (options: EN / EN2 / EN3 / ID / ID2 / ID3)')
# Path: data_utils.py
def load_xnli_dataset():
xnli_dataset = datasets.load_dataset('xtreme', 'XNLI')
df = xnli_dataset['test'].to_pandas()
xnli_dsets = {}
for lang, lang_df in df.groupby('language'):
lang_df = lang_df[['sentence1', 'sentence2', 'gold_label']]
lang_df.columns = ['text_1', 'text_2', 'label']
xnli_dsets[f'xnli_{lang}'] = DatasetDict({'test': Dataset.from_pandas(lang_df.reset_index(drop=True))})
return xnli_dsets
# Path: data_utils.py
def load_nusa_menulis_dataset():
nusa_menulis_dsets = {}
for (dset, task, lang) in NUSA_MENULIS_TASKS:
nusa_menulis_dsets[f'{dset}_{task}_{lang}'] = load_single_dataset(dset, task, lang, base_path='./nusamenulis')
return nusa_menulis_dsets
# Path: data_utils.py
def load_nlu_tasks():
conhelps = NusantaraConfigHelper()
nlu_datasets = {
helper.config.name: helper.load_dataset() for helper in conhelps.filtered(lambda x: x.config.name in TEXT_CLASSIFICATION_TASKS)
}
return nlu_datasets
# Path: main_nlu_prompt.py
import os, sys
import csv
import pandas as pd
import torch
import torch.nn.functional as F
from os.path import exists
from numpy import argmax
from tqdm import tqdm
from sklearn.metrics import f1_score, accuracy_score
from nlu_prompt import get_prompt
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
from nusacrowd import NusantaraConfigHelper
from data_utils import load_xnli_dataset, load_nusa_menulis_dataset, load_nlu_tasks
"""nusacrowd zero-shot prompt.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Ru8DyS2ALWfRdkjOPHj-KNjw6Pfa44Nd
"""
#!pip install git+https://github.com/IndoNLP/nusa-crowd.git@release_exp
#!pip install transformers
#!pip install sentencepiece
DEBUG=False
def to_prompt(input, prompt, labels, prompt_lang):
# single label
if 'text' in input:
prompt = prompt.replace('[INPUT]', input['text'])
else:
prompt = prompt.replace('[INPUT_A]', input['text_1'])
prompt = prompt.replace('[INPUT_B]', input['text_2'])
# replace [OPTIONS] to A, B, or C
if "[OPTIONS]" in prompt:
new_labels = [f'{l}' for l in labels]
new_labels[-1] = ("or " if 'EN' in prompt_lang else "atau ") + new_labels[-1]
if len(new_labels) > 2:
prompt = prompt.replace('[OPTIONS]', ', '.join(new_labels))
else:
prompt = prompt.replace('[OPTIONS]', ' '.join(new_labels))
return prompt
@torch.no_grad()
def get_logprobs(model, tokenizer, prompt, label_ids=None, label_attn=None):
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024).to('cuda')
input_ids, output_ids = inputs["input_ids"], inputs["input_ids"][:, 1:]
outputs = model(**inputs, labels=input_ids)
logits = outputs.logits
if model.config.is_encoder_decoder:
logprobs = torch.gather(F.log_softmax(logits, dim=2), 2, label_ids.unsqueeze(2)) * label_attn.unsqueeze(2)
return logprobs.sum() / label_attn.sum()
else:
logprobs = torch.gather(F.log_softmax(logits, dim=2), 2, output_ids.unsqueeze(2))
return logprobs.mean()
def predict_classification(model, tokenizer, prompt, labels):
if model.config.is_encoder_decoder:
labels_encoded = tokenizer(labels, add_special_tokens=False, padding=True, return_tensors='pt')
list_label_ids =labels_encoded['input_ids'].to('cuda')
list_label_attn =labels_encoded['attention_mask'].to('cuda')
probs = [
get_logprobs(model, tokenizer, prompt.replace('[LABELS_CHOICE]', ''), label_ids.view(1,-1), label_attn.view(1,-1))
for (label_ids, label_attn) in zip(list_label_ids, list_label_attn)
]
else:
probs = [get_logprobs(model, tokenizer, prompt.replace('[LABELS_CHOICE]', label)) for label in labels]
return probs
if __name__ == '__main__':
if len(sys.argv) < 3:
raise ValueError('main_nlu_prompt.py <prompt_lang> <model_path_or_name> <optional_output_name>')
prompt_lang = sys.argv[1]
MODEL = sys.argv[2]
output_name = None
if len(sys.argv) == 4:
output_name = sys.argv[3]
os.makedirs('./outputs', exist_ok=True)
# Load Prompt
DATA_TO_PROMPT = get_prompt(prompt_lang)
# Load Dataset
print('Load NLU Datasets...')
| nlu_datasets = load_nlu_tasks() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ambient-innovation/django-migration-zero
# Path: django_migration_zero/exceptions.py
class InvalidMigrationTreeError(RuntimeError):
pass
# Path: django_migration_zero/managers.py
class MigrationZeroConfigurationManager(models.Manager):
def fetch_singleton(self) -> None:
logger = get_logger()
try:
number_records = self.count()
except ProgrammingError:
logger.warning(
"The migration zero table is missing. This might be ok for the first installation of "
'"django-migration-zero" but if you see this warning after that point, something went sideways.'
)
return None
if number_records > 1:
raise MissingMigrationZeroConfigRecordError(
"Too many configuration records detected. There can only be one."
)
config_singleton = self.all().first()
if not config_singleton:
raise MissingMigrationZeroConfigRecordError("No configuration record found in the database.")
return config_singleton
# Path: django_migration_zero/models.py
class MigrationZeroConfiguration(models.Model):
migration_imminent = models.BooleanField(
_("Migration imminent"),
default=False,
help_text=_("Enable this checkbox to prepare the database for a migration zero reset on the next deployment."),
)
migration_date = models.DateField(_("Migration date"), null=True, blank=True)
objects = MigrationZeroConfigurationManager()
class Meta:
verbose_name = _("Configuration")
verbose_name_plural = _("Configurations")
def __str__(self):
return "Configuration"
@property
def is_migration_applicable(self) -> bool:
"""
Checks if we are currently preparing for a "migration zero"-deployment
"""
logger = get_logger()
if not self.migration_imminent:
logger.info("Switch not active. Skipping migration zero process.")
return False
if not self.migration_date == timezone.now().date():
logger.info("Security date doesn't match today. Skipping migration zero process.")
return False
return True
# Path: django_migration_zero/services/deployment.py
class DatabasePreparationService:
"""
Service to prepare the database for an upcoming commit in the CI/CD pipeline.
"""
logger: Logger
def __init__(self):
super().__init__()
self.logger = get_logger()
def process(self):
self.logger.info("Starting migration zero database adjustments...")
# Fetch configuration singleton from database
config_singleton = MigrationZeroConfiguration.objects.fetch_singleton()
# If we encountered a problem or are not planning to do a migration reset, we are done here
if not (config_singleton and config_singleton.is_migration_applicable):
return
# Reset migration history in database for all apps because there might be dependency issues if we keep the
# records of the other ones
self.logger.info("Resetting migration history for all apps...")
with connections["default"].cursor() as cursor:
cursor.execute("DELETE FROM `django_migrations`")
# Apply migrations via fake because the database is already up-to-date
self.logger.info("Populating migration history.")
call_command("migrate", fake=True)
# Check if migration tree is valid
self.logger.info("Checking migration integrity.")
migrate_check = call_command("migrate", check=True)
if not migrate_check:
self.logger.info("All good.")
else:
raise InvalidMigrationTreeError(
'The command "migrate --check" returned a non-zero error code. '
"Your migration structure seems to be invalid."
)
# Process finished, deactivate migration zero switch
self.logger.info("Deactivating migration zero switch in database.")
config_singleton.migration_imminent = False
config_singleton.save()
self.logger.info("Process successfully finished.")
# Path: tests/services/test_deployment.py
from logging import Logger
from unittest import mock
from django.test import TestCase
from django.utils import timezone
from freezegun import freeze_time
from django_migration_zero.exceptions import InvalidMigrationTreeError
from django_migration_zero.managers import MigrationZeroConfigurationManager
from django_migration_zero.models import MigrationZeroConfiguration
from django_migration_zero.services.deployment import DatabasePreparationService
@freeze_time("2023-06-26")
class DatabasePreparationServiceTest(TestCase):
config: MigrationZeroConfiguration
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.service = DatabasePreparationService()
cls.config, _ = MigrationZeroConfiguration.objects.get_or_create()
def test_init_logger_set(self):
self.assertIsInstance(self.service.logger, Logger)
def test_process_regular(self):
# Setup
self.config.migration_imminent = True
self.config.migration_date = timezone.now().date()
self.config.save()
# Assertions
self.assertIsNone(self.service.process())
self.config.refresh_from_db()
self.assertFalse(self.config.migration_imminent)
@mock.patch.object(MigrationZeroConfiguration, "is_migration_applicable", return_value=False)
| @mock.patch.object(MigrationZeroConfigurationManager, "fetch_singleton", return_value=None) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Lucchetto/model_converter
# Path: src/licensing.py
def setup_pub_key() -> (rsa.RSAPublicKey | None):
str = os.environ.get('LICENSING_PUB_KEY')
if str:
logging.info("LICENSING_PUB_KEY defined, Play Store licensing validation will be performed")
key = serialization.load_der_public_key(
base64.b64decode(str),
backend=default_backend()
)
# Check if the key is an instance of RSA public key
if isinstance(key, rsa.RSAPublicKey):
return key
else:
raise ValueError("The key is not an RSA public key.")
else:
logging.info("LICENSING_PUB_KEY not defined, no licensing validation will be performed")
return None
# Path: src/licensing.py
def validate_license(key: rsa.RSAPublicKey, licensing_response_data: str | None, signature: str | None) -> bool:
"""Validates license response from Play Store
"""
# Extract license data from response
if licensing_response_data is None:
return False
license_data = licensing_response_data.split("|")
if len(license_data) < 6:
return False
license_status = LicensingStatus.from_value(safe_str_to_int(license_data[0]))
package_name = license_data[2]
# Remove extra data separated with |{timestamp}:{extra_data}
timestamp = int(license_data[5].split(":")[0])
# License responses with old timestamp should considered invalid too
if (timestamp + LICENSE_RESPONSE_VALIDITY_TIME < datetime.now(timezone.utc).timestamp() * 1000):
return False
# Checking signature is not necessary if not licensed or if licensed check signature is provided
if license_status is not LicensingStatus.LICENSED or signature is None:
return False
# Verify reponse data integrity if status is licensed
try:
key.verify(
base64.b64decode(signature),
licensing_response_data.encode(),
padding.PKCS1v15(),
hashes.SHA1()
)
return True
except InvalidSignature as e:
return False
# Path: src/converter.py
class UnsupportedModelArch(Exception):
pass
# Path: src/converter.py
def convert_pth_to_onnx(input_model: str, output_model: str):
(model, _, _) = load_model_node(input_model)
if model.__class__.__name__ in (DAT.__name__, CodeFormer.__name__, GFPGANv1Clean.__name__, RestoreFormer.__name__):
raise UnsupportedModelArch()
# set the train mode to false since we will only run the forward pass.
model.train(False)
model.cpu().eval()
# An example input
x = torch.rand(1, model.in_nc, 256, 256)
# Export the model
with torch.no_grad():
dynamic_axes = {'input':{0:'batch_size' , 2:'width', 3:'height'}, 'output':{0:'batch_size' , 2:'width', 3:'height'}}
torch.onnx.export(
model,
x,
output_model,
opset_version=11,
export_params=True,
input_names = ['input'],
output_names = ['output'],
dynamic_axes=dynamic_axes)
# Path: src/api.py
from enum import Enum
from flask import Flask, Response, jsonify, request, send_file
from src.licensing import setup_pub_key, validate_license
from .converter import UnsupportedModelArch, convert_pth_to_onnx
import logging
import os
import uuid
class ApiErrorReason(Enum):
UNSUPPORTED_ARCH = "UNSUPPORTED_ARCH"
INVALID_LICENSE = 'INVALID_LICENSE'
UNSUPPORTED_FORMAT = 'UNSUPPORTED_FORMAT'
UNKNOWN = 'UNKNOWN'
def api_error(reason: ApiErrorReason):
if reason == ApiErrorReason.INVALID_LICENSE:
status_code = 401
else:
status_code = 400
return jsonify({"reason": reason.value}), status_code
def create_app():
logging.basicConfig(level=logging.NOTSET)
app = Flask(__name__)
# Ensure the directory exists
os.makedirs("tmp", exist_ok=True)
| pub_key = setup_pub_key() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: hpsaturn/pilauncher
# Path: gui.py
class GuiManager():
def __init__(self):
self.am = AppManager()
self.wlevel = 0
self.showApp()
def showApp(self):
if self.wlevel == 0:
print(self.am.getCurrentApp().name)
return self.am.getCurrentApp().name
else:
print(self.am.getCurrentCmd().name)
return self.am.getCurrentCmd().name
def showNextApp(self):
if self.wlevel == 0:
self.am.getNextApp()
else:
self.am.getNextCmd()
return self.showApp()
def getAppStatusCmd(self):
if self.wlevel == 0:
return self.am.getCurrentApp().sta_cmd
else:
return None
def getAppStatus(self):
if self.wlevel == 0:
return self.am.getCurrentApp().status
else:
return ''
def runBack(self):
self.wlevel=0
self.am.reset()
return self.showApp()
def runAction(self):
if self.wlevel==0:
self.wlevel=1
return self.showApp()
else:
command = self.am.getCurrentCmd().command
if command == 'back':
return self.runBack()
else:
return 'exec::'+command
def getConfig(self):
return self.am.cfg
# Path: display.py
class Display:
WIDTH = 128
HEIGHT = 32 # Change to 64 if needed
def __init__(self):
# Create the I2C interface.
i2c = board.I2C()
# Define the Reset Pin
oled_reset = digitalio.DigitalInOut(board.D4)
self.disp = adafruit_ssd1306.SSD1306_I2C(self.WIDTH, self.HEIGHT, i2c, addr=0x3C, reset=oled_reset)
# timer for auto disp off
self.timer_screen = time.time()
# general semaphore
self.mutex = threading.Lock()
# Clear display.
self.disp.fill(0)
self.disp.show()
# Create blank image for drawing.
self.w = self.disp.width
self.h = self.disp.height
self.image = Image.new("1", (self.w, self.h))
# Get drawing object to draw on image.
self.draw = ImageDraw.Draw(self.image)
# Draw a black filled box to clear the image.
self.draw.rectangle((0, 0, self.w, self.h), outline=0, fill=0)
#padding
self.top = -2
self.x = 0
# Load default font.
self.fntS = ImageFont.load_default()
self.fntB = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 18)
def showString(self, msg):
self.mutex.acquire()
self.draw.rectangle((0, 0, self.w, self.h), outline=0, fill=0)
self.draw.text((self.x, self.top + 0), msg[:12], font=self.fntB, fill=255)
self.disp.image(self.image)
self.disp.show()
self.mutex.release()
def showStatus(self, msg):
self.mutex.acquire()
self.draw.rectangle((0, 18, self.w-1, self.h- 1), outline=0, fill=0)
self.draw.text((self.x, self.top + 25), msg[:21], font=self.fntS, fill=255)
self.disp.image(self.image)
self.disp.show()
self.mutex.release()
def showInfoLines(self, lines):
self.mutex.acquire()
self.draw.rectangle((0, 0, self.w, self.h), outline=0, fill=0)
pos = 0
for line in lines:
self.draw.text((self.x, self.top + pos), line[:21], font=self.fntS, fill=255)
pos = pos + 8
self.disp.image(self.image)
self.disp.show()
self.mutex.release()
def powerOffTimerReset(self):
self.timer_screen = time.time()
if not self.disp.power:
self.disp.poweron()
def powerOffTimerLoop(self, time_off):
if not self.disp.power:
return
if time.time() - self.timer_screen > time_off:
self.disp.poweroff()
# Path: main.py
import time
import subprocess
import threading
import RPi.GPIO as GPIO
from gui import GuiManager
from display import Display
BTNLFT = 23
BTNRGT = 6
onAppStatusTask = False
onSystemStatsTask = False
isBtnRgtPresed = False
isBtnLftPresed = False
onStats = False
# GUI Apps Manager
gui = GuiManager()
cfg = gui.getConfig()
| dsp = Display() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: CAMeL-Lab/camel_parser
# Path: src/logger.py
def log(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
with open(log_path, 'a') as f:
f.write(f'{map_function_to_phrase(func.__name__)}: {round(end_time - start_time, 2)}s\n')
return result
except Exception as e:
logger.exception(f"Exception raised in {func.__name__}. exception: {str(e)}")
raise e
return wrapper
# Path: src/initialize_disambiguator/bert_disambiguator.py
def create_bert_disambiguator(analyzer):
model = BERTUnfactoredDisambiguator.pretrained("msa", top=1000, pretrained_cache=False)
model._analyzer = analyzer
return model
# Path: src/initialize_disambiguator/mle_disambiguator.py
class MLEDisambiguatorAdapter():
def __init__(self, analyzer: Analyzer):
self.disambiguator = MLEDisambiguator(analyzer=analyzer)
# def pretrained(self, analyzer):
# self.disambiguator = self.disambiguator
def disambiguate(self, sentence: List[str]) -> List[DisambiguatedWord]:
return self.disambiguator.disambiguate(sentence)
def disambiguate_sentences(self, lines: List[List[str]]) -> List[List[DisambiguatedWord]]:
return [self.disambiguator.disambiguate(line) for line in lines]
# Path: src/initialize_disambiguator/disambiguator_interface.py
from typing import Union
from camel_tools.morphology.database import MorphologyDB
from camel_tools.morphology.analyzer import Analyzer
from camel_tools.disambig.bert import BERTUnfactoredDisambiguator
from src.logger import log
from src.initialize_disambiguator.bert_disambiguator import create_bert_disambiguator
from src.initialize_disambiguator.mle_disambiguator import MLEDisambiguatorAdapter
def set_up_analyzer(morphology_db: str) -> Analyzer:
# used to initialize an Analyzer with ADD_PROP backoff
# db = MorphologyDB.builtin_db('calima-msa-s31')
db_type = None if morphology_db == 'r13' else morphology_db
db = MorphologyDB.builtin_db(db_name=db_type)
return Analyzer(db=db, backoff='ADD_PROP', cache_size=100000)
@log
def get_disambiguator(model_name: str, morphology_db: str) -> Union[MLEDisambiguatorAdapter, BERTUnfactoredDisambiguator]:
analyzer = set_up_analyzer(morphology_db)
if model_name == 'mle':
model = MLEDisambiguatorAdapter(analyzer)
elif model_name == 'bert':
| model = create_bert_disambiguator(analyzer) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: JerBouma/FinancePortfolio
# Path: financeportfolio/excel_model.py
def create_portfolio_performance_excel_report(
writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = "$"
):
def create_transactions_performance_excel_report(
writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = "$"
):
def create_portfolio_overview_excel_report(
writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = "$"
):
def create_positions_overview_excel_report(
writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = "$"
):
# Path: financeportfolio/helpers.py
BASE_URL = "https://raw.githubusercontent.com/JerBouma/FinancePortfolio/main/"
VALID_CODE = 200
RED = "\033[91m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
BLUE = "\033[94m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
RESET = "\033[0m"
class Style:
def read_excel(location: str):
def read_yaml_file(location: str):
def download_example_datasets(base_url: str | None = None):
def download_yaml_configuration(example: bool = False, name: str | None = None):
# Path: financeportfolio/portfolio_model.py
CURRENCY_CODE_LENGTH = 3
def read_portfolio_dataset(
excel_location: list,
adjust_duplicates: bool,
date_column: list[str],
date_format: str,
name_columns: list[str],
ticker_columns: list[str],
price_columns: list[str],
volume_columns: list[str],
column_mapping: dict[str, str],
currency_columns: list[str] | str | None = None,
costs_columns: list[str] | None = None,
) -> tuple[pd.DataFrame, str, str, str, str, str, str]:
def format_portfolio_dataset(
dataset: pd.DataFrame,
date_columns: list[str],
date_format: str,
name_columns: list[str],
tickers_columns: list[str],
price_columns: list[str],
volume_columns: list[str],
column_mapping: dict[str, str],
currency_columns: list[str] | str | None = None,
costs_columns: list[str] | None = None,
) -> tuple[pd.DataFrame, str, str, str, str, str, str, str]:
def create_transactions_overview(
portfolio_volume: pd.Series,
portfolio_price: pd.Series,
portfolio_costs: pd.Series,
latest_returns: pd.Series,
):
def create_portfolio_overview(
portfolio_name: pd.Series,
portfolio_volume: pd.Series,
portfolio_price: pd.Series,
portfolio_costs: pd.Series,
latest_returns: pd.Series,
benchmark_prices: pd.Series,
benchmark_latest_prices: pd.Series,
):
def create_transactions_performance(
portfolio_dataset: pd.DataFrame,
ticker_column: str,
date_column: str,
volume_column: str,
price_column: str,
costs_column: str,
period_prices: pd.DataFrame,
period_string: str,
original_ticker_combinations: dict,
benchmark_per_ticker: dict,
benchmark_specific_prices: pd.Series,
benchmark_period_prices: pd.DataFrame,
):
def create_positions_overview(
portfolio_tickers: list[str],
period_dates: pd.DatetimeIndex,
portfolio_dataset: pd.DataFrame,
historical_prices: pd.Series,
columns: list[str] | None = None,
):
def create_portfolio_performance(
positions_dataset: pd.DataFrame,
date_column: str,
ticker_column: str,
period_string: str,
):
# Path: financeportfolio/portfolio_controller.py
import pandas as pd
from financetoolkit import Toolkit
from financeportfolio import excel_model, helpers, portfolio_model
"""Portfolio Module"""
# pylint: disable=too-many-instance-attributes,abstract-class-instantiated,
# pylint: disable=too-few-public-methods,protected-access,too-many-lines
class Portfolio:
"""
A class for managing and analyzing your portfolio.
This class provides functionality for loading, preprocessing, categorizing, and analyzing
cash flow data based on a specified configuration file. It offers methods to read and format
the dataset, apply cost or income indicators, categorize transactions, and create periodical
cash flow overviews.
Parameters:
configuration_file (str): The file path to the configuration file in YAML format. The
configuration file should define various settings and columns used in cash flow
analysis.
Attributes:
_configuration_file (str): The file path to the configuration file.
_cash_flow_dataset (pd.DataFrame): The cash flow dataset as a pandas DataFrame.
Note:
- The configuration file should be in YAML format and contain settings for date columns,
description columns, amount columns, and optionally cost/income columns.
- Initialize an instance of this class to begin cash flow analysis.
"""
def __init__(
self,
configuration_file: str | None = None,
portfolio_dataset: pd.DataFrame = pd.DataFrame(),
example: bool = False,
):
"""
Initialize a Cashflow instance with the provided configuration file.
This constructor sets up the Cashflow instance by loading the configuration file, defining
default attributes, and initializing the cash flow dataset as an empty DataFrame.
Parameters:
configuration_file (str): The file path to the configuration file in YAML format.
Raises:
ValueError: If the provided configuration file does not have a '.yaml' extension.
Only '.yaml' configuration files are supported.
"""
if example:
| configuration_file = helpers.download_yaml_configuration(example=True)
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: S2-group/UPISAS
# Path: UPISAS/exceptions.py
class DockerImageNotFoundOnDockerHub(UPISASException):
pass
# Path: UPISAS/exemplar.py
class Exemplar(ABC):
"""
A class which encapsulates a self-adaptive exemplar run in a docker container.
"""
_container_name = ""
def __init__(self, base_endpoint: "string with the URL of the exemplar's HTTP server", \
docker_kwargs,
auto_start: "Whether to immediately start the container after creation" =False,
):
'''Create an instance of the Exemplar class'''
self.base_endpoint = base_endpoint
image_name = docker_kwargs["image"]
image_owner = image_name.split("/")[0]
try:
docker_client = docker.from_env()
try:
docker_client.images.get(image_name)
logging.info(f"image '{image_name}' found locally")
except docker.errors.ImageNotFound:
logging.info(f"image '{image_name}' not found locally")
images_from_owner = docker_client.images.search(image_owner)
if image_name.split(":")[0] in [i["name"] for i in images_from_owner]:
logging.info(f"image '{image_name}' found on DockerHub, pulling it")
with Progress() as progress:
for line in docker_client.api.pull(image_name, stream=True, decode=True):
show_progress(line, progress)
else:
logging.error(f"image '{image_name}' not found on DockerHub, exiting!")
raise DockerImageNotFoundOnDockerHub
docker_kwargs["detach"] = True
self.exemplar_container = docker_client.containers.create(**docker_kwargs)
except DockerException as e:
# TODO: Properly catch various errors. Currently, a lot of errors might be caught here.
# Please check the logs if that happens.
raise e
if auto_start:
self.start_container()
@abstractmethod
def start_run(self):
pass
def start_container(self):
'''Starts running the docker container made from the given image when constructing this class'''
try:
container_status = self.get_container_status()
if container_status == "running":
logging.warning("container already running...")
else:
logging.info("starting container...")
self.exemplar_container.start()
return True
except docker.errors.NotFound as e:
logging.error(e)
def stop_container(self, remove=True):
'''Stops the docker container made from the given image when constructing this class'''
try:
container_status = self.get_container_status()
if container_status == "exited":
logging.warning("container already stopped...")
if remove:
self.exemplar_container.remove()
self.exemplar_container = None
else:
logging.info("stopping container...")
self.exemplar_container.stop()
if remove:
self.exemplar_container.remove()
self.exemplar_container = None
return True
except docker.errors.NotFound as e:
logging.warning(e)
logging.warning("cannot stop container")
def pause_container(self):
'''Pauses a running docker container made from the given image when constructing this class'''
try:
container_status = self.get_container_status()
if container_status == "running":
logging.info("pausing container...")
self.exemplar_container.pause()
return True
elif container_status == "paused":
logging.warning("container already paused...")
return True
else:
logging.warning("cannot pause container since it's not running")
return False
except docker.errors.NotFound as e:
logging.error(e)
logging.error("cannot pause container")
def unpause_container(self):
'''Resumes a paused docker container made from the given image when constructing this class'''
try:
container_status = self.get_container_status()
if container_status == "paused":
logging.info("unpausing container...")
self.exemplar_container.unpause()
return True
elif container_status == "running":
logging.warning("container already running (why unpause it?)...")
return True
else:
logging.warning("cannot unpause container since it's not paused")
return False
except docker.errors.NotFound as e:
logging.warning(e)
logging.warning("cannot unpause container")
def get_container_status(self):
if self.exemplar_container:
self.exemplar_container.reload()
return self.exemplar_container.status
return "removed"
# Path: UPISAS/exemplars/demo_exemplar.py
class DemoExemplar(Exemplar):
"""
A class which encapsulates a self-adaptive exemplar run in a docker container.
"""
def __init__(self, auto_start=False, container_name="upisas-demo"):
docker_config = {
"name": container_name,
"image": "iliasger/upisas-demo-managed-system",
"ports" : {3000: 3000}}
super().__init__("http://localhost:3000", docker_config, auto_start)
def start_run(self, app):
self.exemplar_container.exec_run(cmd = f' sh -c "cd /usr/src/app && node {app}" ', detach=True)
# Path: UPISAS/tests/upisas/test_exemplar.py
import unittest
from UPISAS.exceptions import DockerImageNotFoundOnDockerHub
from UPISAS.exemplar import Exemplar
from UPISAS.exemplars.demo_exemplar import DemoExemplar
class TestExemplar(unittest.TestCase):
"""
Test cases for the Exemplar class using the DemoExemplar.
"""
def setUp(self):
self.exemplar = None
def tearDown(self):
if self.exemplar and self.exemplar.exemplar_container:
self.exemplar.stop_container()
def test_init_successfully_wihout_auto_start(self):
| self.exemplar = DemoExemplar(auto_start=False) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: developerlin/excelchat-streamlit
# Path: middleware/base.py
class CustomChartsMiddleware(ChartsMiddleware):
def run(self, code: str) -> str:
# code = super().run(code)
processed = []
for line in code.split("\n"):
if line.find("plt.close()") != -1:
idx = line.find("plt")
blank = "".join([' ' for c in range(idx)])
# Fix the chinese character display issue
processed.append(blank + "plt.rcParams['font.sans-serif']=['SimHei']")
processed.append(blank + "plt.rcParams['axes.unicode_minus']=False")
# processed.append(blank + "plt.savefig('temp_chart.png')")
processed.append(line)
else:
processed.append(line)
code = "\n".join(processed)
return code
# Path: parser/response_parser.py
class CustomResponseParser(ResponseParser):
def format_plot(self, result: dict) -> Any:
super().format_plot(result)
filename = str(uuid.uuid4()).replace("-", "")
temp_image_path = Path(f"{tempfile.tempdir}/streamlit/{filename}.png")
temp_image_path.parent.mkdir(parents=True, exist_ok=True)
original_path = Path("temp_chart.png")
shutil.copy(original_path, temp_image_path)
print("image created: ", str(temp_image_path))
return {"type": "plot", "value": str(temp_image_path)}
# Path: util.py
def get_open_ai_model(api_key):
return OpenAI(api_token=api_key)
# Path: util.py
def get_ollama_model(model_key, base_url):
llm = Ollama(model=model_key, base_url=base_url, verbose=True)
return LangchainLLM(langchain_llm=llm)
# Path: util.py
def get_baidu_as_model(access_token):
llm_core = AIStudioErnieBot(access_token=access_token, verbose=True)
return LangchainLLM(llm_core)
# Path: util.py
def get_prompt_template():
instruction_template = """
使用提供的 dataframes ('dfs') 分析这个数据,过程中不要调用 dataframe set_index 对数据排序.
1. 准备: 如果有必要对数据做预处理和清洗
2. 执行: 对数据进行数据分析操作 (grouping, filtering, aggregating, etc.)
3. 分析: 进行实际分析(如果用户要求plot chart,请在代码中添加如下两行代码设置字体, 并将结果保存为图像文件temp_chart.png,并且不显示图表)
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
"""
custom_template = GeneratePythonCodePrompt(custom_instructions=instruction_template)
return custom_template
# Path: util.py
def get_baidu_qianfan_model(client_id, client_secret):
llm_core = ErnieBotChat(
model_name="ERNIE-Bot",
temperature=0.1,
ernie_client_id=client_id,
ernie_client_secret=client_secret
)
return LangchainLLM(llm_core)
# Path: Home.py
import io
import logging
import uuid
import matplotlib
import pandas as pd
import streamlit as st
from pathlib import Path
from typing import Dict
from pandasai import SmartDataframe, Agent, Config
from pandasai.callbacks import StdoutCallback
from pandasai.helpers import Logger
from middleware.base import CustomChartsMiddleware
from parser.response_parser import CustomResponseParser
from util import get_open_ai_model, get_ollama_model, get_baidu_as_model, get_prompt_template, get_baidu_qianfan_model
logger = Logger()
matplotlib.rc_file("./.matplotlib/.matplotlibrc");
# page settings
st.set_page_config(page_title="Excel Chat", layout="wide")
st.header("What ExcelChat can do?")
st.text("ExcelChat is a lightweight data analysis app powered by LLM, showcasing how LLM can revolutionize the future"
"of data analysis.")
st.markdown("""List of todos
- [x] Add memory
- [x] Support non-latin text in chart
- [ ] Sub questions support
""")
class AgentWrapper:
id: str
agent: Agent
def __init__(self) -> None:
self.agent = None
self.id = str(uuid.uuid4())
def get_llm(self):
op = st.session_state.last_option
llm = None
if op == "Ollama":
llm = get_ollama_model(st.session_state.ollama_model, st.session_state.ollama_base_url)
elif op == "OpenAI":
if st.session_state.api_token != "":
llm = get_open_ai_model(st.session_state.api_token)
elif op == "Baidu/AIStudio-Ernie-Bot":
if st.session_state.access_token != "":
llm = get_baidu_as_model(st.session_state.access_token)
elif op == "Baidu/Qianfan-Ernie-Bot":
if st.session_state.client_id != "" and st.session_state.client_secret != "":
llm = get_baidu_qianfan_model(st.session_state.client_id, st.session_state.client_secret)
if llm is None:
st.toast("LLM initialization failed, check LLM configuration", icon="🫤")
return llm
def set_file_data(self, df):
llm = self.get_llm()
if llm is not None:
print("llm.type", llm.type)
config = Config(
llm=llm,
callback=StdoutCallback(),
# middlewares=[CustomChartsMiddleware()],
response_parser=CustomResponseParser,
custom_prompts={
| "generate_python_code": get_prompt_template() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ZiaWang/jqtrade
# Path: jqtrade/account/order.py
class OrderSide(Enum):
# 多仓
long = "long"
# 空仓
short = "short"
@classmethod
def is_valid_side(cls, side):
return side in cls.__members__
@classmethod
def get_side(cls, side):
if isinstance(side, cls):
return side
try:
return cls.__members__[side]
except KeyError:
raise ValueError(f"invalid side: {side}")
# Path: jqtrade/account/api.py
class UserPosition(object):
def __init__(self, sys_position):
self.__position = sys_position
@classmethod
def get_empty_pos(cls, code, side):
return UserPosition(Position(code, 0, 0, 0, side, position_value=0, last_price=0))
@property
def security(self):
return self.__position.code
@property
def total_amount(self):
return self.__position.amount
@property
def closeable_amount(self):
return self.__position.available_amount
@property
def avg_cost(self):
return self.__position.avg_cost
acc_avg_cost = avg_cost
@property
def side(self):
if self.__position.side:
return self.__position.side.value
@property
def last_price(self):
return self.__position.last_price
price = last_price
@property
def position_value(self):
return self.__position.position_value
value = position_value
def __str__(self):
return f"UserPosition(security={self.security}, total_amount={self.total_amount}, " \
f"closeable_amount={self.closeable_amount}, avg_cost={self.avg_cost}, side={self.side}, " \
f"last_price={self.last_price}, position_value={self.position_value})"
# Path: jqtrade/account/api.py
class UserPositionDict(dict):
def __init__(self, side, *args, **kwargs):
super(UserPositionDict, self).__init__(*args, **kwargs)
self._side = side
def __getitem__(self, code):
try:
return dict.__getitem__(self, code)
except KeyError:
sys_logger.warn(f"{code} 在 positions 中不存在,我们返回空的 Position 对象, "
f"total_amount/closeable_amount/avg_cost/acc_avg_cost/position_value/last_price 都是 0")
return UserPosition.get_empty_pos(code, side=self._side)
# Path: jqtrade/account/portfolio.py
from .order import OrderSide
from .api import UserPosition, UserPositionDict
# -*- coding: utf-8 -*-
class Portfolio(object):
""" 账户资金/持仓信息聚合类 """
def __init__(self, account):
self.__account = account
@property
def long_positions(self):
| positions = UserPositionDict(OrderSide.long) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Glasgow-AI4BioMed/GenKIE
# Path: data/data_utils.py
def infer_language_pair(path):
def collate_tokens(
values,
pad_idx,
eos_idx=None,
left_pad=False,
move_eos_to_beginning=False,
pad_to_length=None,
pad_to_multiple=1,
pad_to_bsz=None,
):
def copy_tensor(src, dst):
def load_indexed_dataset(
path, dictionary=None, dataset_impl=None, combine=False, default="cached"
):
def numpy_seed(seed, *addl_seeds):
def collect_filtered(function, iterable, filtered):
def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False):
def compare_leq(a, b):
def check_size(idx):
def filter_by_size(indices, dataset, max_positions, raise_exception=False):
def filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes):
def batch_by_size(
indices,
num_tokens_fn,
num_tokens_vec=None,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
fixed_shapes=None,
):
def post_process(sentence: str, symbol: str):
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
def arrange(s, e, length, keep_length):
def get_mem_usage():
def lengths_to_padding_mask(lens):
def lengths_to_mask(lens):
def get_buckets(sizes, num_buckets):
def get_bucketed_sizes(orig_sizes, buckets):
def _find_extra_valid_paths(dataset_path: str) -> set:
def raise_if_valid_subsets_unintentionally_ignored(train_cfg) -> None:
# Path: data/ofa_dataset.py
class OFADataset(FairseqDataset):
def __init__(self, split, dataset, bpe, src_dict, tgt_dict):
self.split = split
self.dataset = dataset
self.bpe = bpe
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.bos = src_dict.bos()
self.eos = src_dict.eos()
self.pad = src_dict.pad()
self.bos_item = torch.LongTensor([self.bos])
self.eos_item = torch.LongTensor([self.eos])
def __len__(self):
return len(self.dataset)
def encode_text(self, text, length=None, append_bos=False, append_eos=False, use_bpe=True):
s = self.tgt_dict.encode_line(
line=self.bpe.encode(text) if use_bpe else text,
add_if_not_exist=False,
append_eos=False
).long()
if length is not None:
s = s[:length]
if append_bos:
s = torch.cat([self.bos_item, s])
if append_eos:
s = torch.cat([s, self.eos_item])
return s
def pre_question(self, question, max_ques_words=None):
question = question.lower().lstrip(",.!?*#:;~").replace('-', ' ').replace('/', ' ')
question = re.sub(
r"\s{2,}",
' ',
question,
)
question = question.rstrip('\n')
question = question.strip(' ')
# truncate question
question_words = question.split(' ')
if max_ques_words is not None and len(question_words) > max_ques_words:
question = ' '.join(question_words[:max_ques_words])
return question
def pre_caption(self, caption, max_words=None):
caption = caption.lower().lstrip(",.!?*#:;~").replace('-', ' ').replace('/', ' ').replace('<person>', 'person')
caption = re.sub(
r"\s{2,}",
' ',
caption,
)
caption = caption.rstrip('\n')
caption = caption.strip(' ')
# truncate caption
caption_words = caption.split(' ')
if max_words is not None and len(caption_words) > max_words:
caption = ' '.join(caption_words[:max_words])
return caption
# Path: data/mm_data/vqa_gen_dataset.py
from io import BytesIO
from torchvision import transforms
from PIL import Image, ImageFile
from data import data_utils
from data.ofa_dataset import OFADataset
import logging
import warnings
import numpy as np
import torch
import base64
# Copyright 2022 The OFA-Sys Team.
# All rights reserved.
# This source code is licensed under the Apache 2.0 license
# found in the LICENSE file in the root directory.
ImageFile.LOAD_TRUNCATED_IMAGES = True
ImageFile.MAX_IMAGE_PIXELS = None
Image.MAX_IMAGE_PIXELS = None
logger = logging.getLogger(__name__)
warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning)
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key):
| return data_utils.collate_tokens( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ArnaudParant/sel
# Path: scripts/elastic.py
def options():
def create_index(filepath, schema_filepath, index, overwrite=False):
def _delete_index(elastic, index):
def loads_ndjson(fd):
def insert(elastic, index, data):
def _create_index(elastic, index, schema_filepath):
def load_schema(filepath):
def elastic_connect():
# Path: sel/utils.py
class InternalServerError(Exception):
class InvalidClientInput(Exception):
class NotFound(Exception):
def __init__(self, message):
def __str__(self):
def __init__(self, message):
def __str__(self):
def __init__(self, message):
def __str__(self):
def set_if_exists(source, dest, keys):
def build_group(operator, items):
def get_lastest_sub_data(data):
def _detailor(exc):
def elastic_exception_detailor(handler):
def handler_wrapper(*args, **kwargs):
# Path: tests/test_sel.py
import pytest
import json
import test_utils
from scripts import elastic
from sel import utils
TEST_INDEX_FILE = "/tests/data/sample_2017.json"
TEST_SCHEMA_FILE = "/scripts/schema.json"
TEST_INDEX = "test_index"
class TestSEL:
@pytest.fixture(scope="function", autouse=True)
def init(self):
elastic.create_index(TEST_INDEX_FILE, TEST_SCHEMA_FILE, TEST_INDEX, overwrite=True)
def __cleaner(self, obj):
if "_score" in obj:
del obj["_score"]
return obj
@pytest.mark.parametrize(["query"], [
[{}],
[{"meta": {"size": 100}}],
[{"meta": {"size": 5}}],
])
def test_scroll(self, sel, query):
with open(TEST_INDEX_FILE, "r") as f:
expected_lines = {d["id"]: d for d in load_ndjson(f)}
documents = []
scroll_id = None
while True:
res = sel.scroll(TEST_INDEX, query, "1m", scroll_id=scroll_id)
documents += res["documents"]
scroll_id = res["scroll_id"]
if not len(res["documents"]):
break
sel.clear_scroll(res["scroll_id"])
found = {}
for line in documents:
j = self.__cleaner(line)
found[j["id"]] = j
for j2 in expected_lines.values():
j = found.get(j2["id"])
j2["_index"] = TEST_INDEX
assert test_utils.dict_equals(j, j2), f"Got: {j}\nExpected: {j2}"
size = len(found)
file_size = len(expected_lines)
assert size == file_size, f"Download line {size} != {file_size}"
@pytest.mark.parametrize(["query"], [
[{"aggregations": {"labels": {"field": "label"}}}],
[{"aggregations": {"ids": {"field": ".id"}}}],
])
def test_download_aggreg(self, sel, query):
def sort_aggreg(aggreg):
aggreg = sorted(aggreg, key=lambda o: o["key"])
return sorted(aggreg, key=lambda o: o["doc_count"], reverse=True)
aggreg_key = list(query["aggregations"].keys())[0]
query["aggregations"][aggreg_key]["size"] = 0
base_aggreg = {"field": "date", "interval": "week"}
res = sel.search(TEST_INDEX, query)
| expected = utils.get_lastest_sub_data(res["results"]["aggregations"][aggreg_key])["buckets"] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Qualcomm-AI-research/outlier-free-transformers
# Path: quantization/quantizers/base_quantizers.py
class QuantizerBase(nn.Module):
def __init__(self, n_bits, *args, per_channel=False, act_quant=False, **kwargs):
super().__init__(*args, **kwargs)
self.n_bits = n_bits
self.act_quant = act_quant
self.per_channel = per_channel
self.state = None
self.x_min_fp32 = self.x_max_fp32 = None
@property
def is_initialized(self):
raise NotImplementedError()
@property
def x_max(self):
raise NotImplementedError()
@property
def symmetric(self):
raise NotImplementedError()
@property
def x_min(self):
raise NotImplementedError()
def forward(self, x_float):
raise NotImplementedError()
def _adjust_params_per_channel(self, x):
raise NotImplementedError()
def set_quant_range(self, x_min, x_max):
raise NotImplementedError()
def extra_repr(self):
return "n_bits={}, per_channel={}, is_initalized={}".format(
self.n_bits, self.per_channel, self.is_initialized
)
def reset(self):
self._delta = None
def fix_ranges(self):
raise NotImplementedError()
def make_range_trainable(self):
raise NotImplementedError()
# Path: quantization/quantizers/quantizer_utils.py
class RoundStraightThrough(Function):
class ScaleGradient(Function):
class QuantizerNotInitializedError(Exception):
def forward(ctx, x):
def backward(ctx, output_grad):
def forward(ctx, x, scale):
def backward(ctx, output_grad):
def __init__(self):
# Path: quantization/quantizers/uniform_quantizers.py
import torch
from quantization.quantizers.base_quantizers import QuantizerBase
from quantization.quantizers.quantizer_utils import (
QuantizerNotInitializedError,
round_ste_func,
scale_grad_func,
)
# Copyright (c) 2023 Qualcomm Technologies, Inc.
# All Rights Reserved.
class AsymmetricUniformQuantizer(QuantizerBase):
"""
PyTorch Module that implements Asymmetric Uniform Quantization using STE.
Quantizes its argument in the forward pass, passes the gradient 'straight
through' on the backward pass, ignoring the quantization that occurred.
Parameters
----------
n_bits: int
Number of bits for quantization.
scale_domain: str ('log', 'linear) with default='linear'
Domain of scale factor
per_channel: bool
If True: allows for per-channel quantization
"""
def __init__(self, n_bits, scale_domain="linear", grad_scaling=False, eps=1e-8, **kwargs):
super().__init__(n_bits=n_bits, **kwargs)
assert scale_domain in ("linear", "log")
self.register_buffer("_delta", None)
self.register_buffer("_zero_float", None)
self.scale_domain = scale_domain
self.grad_scaling = grad_scaling
self.eps = eps
# A few useful properties
@property
def delta(self):
if self._delta is not None:
return self._delta
else:
raise QuantizerNotInitializedError()
@property
def zero_float(self):
if self._zero_float is not None:
return self._zero_float
else:
raise QuantizerNotInitializedError()
@property
def is_initialized(self):
return self._delta is not None
@property
def symmetric(self):
return False
@property
def int_min(self):
# integer grid minimum
return 0.0
@property
def int_max(self):
# integer grid maximum
return 2.0**self.n_bits - 1
@property
def scale(self):
if self.scale_domain == "linear":
return torch.clamp(self.delta, min=self.eps)
elif self.scale_domain == "log":
return torch.exp(self.delta)
@property
def zero_point(self):
| zero_point = round_ste_func(self.zero_float) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: QgZhan/ESVAE
# Path: utils.py
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# Path: utils.py
class aboutCudaDevices():
def __init__(self):
pass
def num_devices(self):
"""Return number of devices connected."""
return cuda.Device.count()
def devices(self):
"""Get info on all devices connected."""
num = cuda.Device.count()
print("%d device(s) found:" % num)
for i in range(num):
print(cuda.Device(i).name(), "(Id: %d)" % i)
def mem_info(self):
"""Get available and total memory of all devices."""
available, total = cuda.mem_get_info()
print("Available: %.2f GB\nTotal: %.2f GB" % (available / 1e9, total / 1e9))
def attributes(self, device_id=0):
"""Get attributes of device with device Id = device_id"""
return cuda.Device(device_id).get_attributes()
def info(self):
"""Class representation as number of devices connected and about them."""
num = cuda.Device.count()
string = ""
string += ("%d device(s) found:\n" % num)
for i in range(num):
string += (" %d) %s (Id: %d)\n" % ((i + 1), cuda.Device(i).name(), i))
string += (" Memory: %.2f GB\n" % (cuda.Device(i).total_memory() / 1e9))
return string
# Path: datasets/load_dataset_ann.py
def load_mnist(data_path, batch_size):
def load_fashionmnist(data_path,batch_size):
def load_celeba(data_path,batch_size):
def load_cifar10(data_path,batch_size):
# Path: main_ann_ae.py
import os
import os.path
import numpy as np
import logging
import argparse
import pycuda.driver as cuda
import torch
import torchvision
import models.ann_ae as ann_ae
from torch.nn.utils import clip_grad_norm_
from torch.nn.utils import clip_grad_value_
from torch.utils.tensorboard import SummaryWriter
from utils import AverageMeter
from utils import aboutCudaDevices
from datasets import load_dataset_ann
max_accuracy = 0
min_loss = 1000
def train(network, trainloader, opti, epoch):
| loss_meter = AverageMeter()
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: iesl/softmax_CPR_recommend
# Path: recbole/model/abstract_recommender.py
class SequentialRecommender(AbstractRecommender):
"""
This is a abstract sequential recommender. All the sequential model should implement This class.
"""
type = ModelType.SEQUENTIAL
def __init__(self, config, dataset):
super(SequentialRecommender, self).__init__()
# load dataset info
self.USER_ID = config['USER_ID_FIELD']
self.ITEM_ID = config['ITEM_ID_FIELD']
self.ITEM_SEQ = self.ITEM_ID + config['LIST_SUFFIX']
self.ITEM_SEQ_LEN = config['ITEM_LIST_LENGTH_FIELD']
self.POS_ITEM_ID = self.ITEM_ID
self.NEG_ITEM_ID = config['NEG_PREFIX'] + self.ITEM_ID
self.max_seq_length = config['MAX_ITEM_LIST_LENGTH']
self.n_items = dataset.num(self.ITEM_ID)
def gather_indexes(self, output, gather_index):
"""Gathers the vectors at the specific positions over a minibatch"""
gather_index = gather_index.view(-1, 1, 1).expand(-1, -1, output.shape[-1])
output_tensor = output.gather(dim=1, index=gather_index)
return output_tensor.squeeze(1)
# Path: recbole/model/layers.py
class TransformerEncoder(nn.Module):
r""" One TransformerEncoder consists of several TransformerLayers.
- n_layers(num): num of transformer layers in transformer encoder. Default: 2
- n_heads(num): num of attention heads for multi-head attention layer. Default: 2
- hidden_size(num): the input and output hidden size. Default: 64
- inner_size(num): the dimensionality in feed-forward layer. Default: 256
- hidden_dropout_prob(float): probability of an element to be zeroed. Default: 0.5
- attn_dropout_prob(float): probability of an attention score to be zeroed. Default: 0.5
- hidden_act(str): activation function in feed-forward layer. Default: 'gelu'
candidates: 'gelu', 'relu', 'swish', 'tanh', 'sigmoid'
- layer_norm_eps(float): a value added to the denominator for numerical stability. Default: 1e-12
"""
def __init__(
self,
n_layers=2,
n_heads=2,
hidden_size=64,
inner_size=256,
hidden_dropout_prob=0.5,
attn_dropout_prob=0.5,
hidden_act='gelu',
layer_norm_eps=1e-12
):
super(TransformerEncoder, self).__init__()
layer = TransformerLayer(
n_heads, hidden_size, inner_size, hidden_dropout_prob, attn_dropout_prob, hidden_act, layer_norm_eps
)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(n_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
"""
Args:
hidden_states (torch.Tensor): the input of the TransformerEncoder
attention_mask (torch.Tensor): the attention mask for the input hidden_states
output_all_encoded_layers (Bool): whether output all transformer layers' output
Returns:
all_encoder_layers (list): if output_all_encoded_layers is True, return a list consists of all transformer
layers' output, otherwise return a list only consists of the output of last transformer layer.
"""
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
# Path: recbole/model/loss.py
class BPRLoss(nn.Module):
""" BPRLoss, based on Bayesian Personalized Ranking
Args:
- gamma(float): Small value to avoid division by zero
Shape:
- Pos_score: (N)
- Neg_score: (N), same shape as the Pos_score
- Output: scalar.
Examples::
>>> loss = BPRLoss()
>>> pos_score = torch.randn(3, requires_grad=True)
>>> neg_score = torch.randn(3, requires_grad=True)
>>> output = loss(pos_score, neg_score)
>>> output.backward()
"""
def __init__(self, gamma=1e-10):
super(BPRLoss, self).__init__()
self.gamma = gamma
def forward(self, pos_score, neg_score):
loss = -torch.log(self.gamma + torch.sigmoid(pos_score - neg_score)).mean()
return loss
# Path: recbole/model/sequential_recommender/sasrec.py
import sys
import torch
import torch.nn.functional as F
import math
from torch import nn
from recbole.model.abstract_recommender import SequentialRecommender
from recbole.model.layers import TransformerEncoder
from recbole.model.loss import BPRLoss
# -*- coding: utf-8 -*-
# @Time : 2020/9/18 11:33
# @Author : Hui Wang
# @Email : hui.wang@ruc.edu.cn
"""
SASRec
################################################
Reference:
Wang-Cheng Kang et al. "Self-Attentive Sequential Recommendation." in ICDM 2018.
Reference:
https://github.com/kang205/SASRec
"""
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
| class SASRec(SequentialRecommender): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: timapage/pyqt6-yolov8
# Path: src/models/detection/detector_base.py
class DetectorBase(YoloPredictorBase):
def draw_results(image, model_results):
FONT_SCALE = 1e-3
THICKNESS_SCALE = 6e-4
# Path: src/models/base/yolov8_base.py
class ModelError(Exception):
pass
# Path: src/utils/boxes.py
def xywh2xyxy(x):
# Convert bounding box (x, y, w, h) to bounding box (x1, y1, x2, y2)
y = np.copy(x)
y[..., 0] = x[..., 0] - x[..., 2] / 2
y[..., 1] = x[..., 1] - x[..., 3] / 2
y[..., 2] = x[..., 0] + x[..., 2] / 2
y[..., 3] = x[..., 1] + x[..., 3] / 2
return y
# Path: src/utils/boxes.py
def multiclass_nms_class_agnostic(boxes, scores, nms_thr, score_thr):
"""Multiclass NMS implemented in Numpy. Class-agnostic version."""
cls_inds = scores.argmax(1)
cls_scores = scores[np.arange(len(cls_inds)), cls_inds]
valid_score_mask = cls_scores > score_thr
if valid_score_mask.sum() == 0:
return None
valid_scores = cls_scores[valid_score_mask]
valid_boxes = boxes[valid_score_mask]
valid_cls_inds = cls_inds[valid_score_mask]
keep = nms(valid_boxes, valid_scores, nms_thr)
#dets = []
for i in keep:
dets = np.concatenate(
[valid_boxes[keep], valid_scores[keep, None], valid_cls_inds[keep, None]], 1
)
return dets
# Path: src/utils/general.py
def get_classes(class_txt_file):
with open(class_txt_file, 'r') as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
# Path: src/models/detection/yolov8_detector_onnx.py
import numpy as np
import cv2 as cv
from onnxruntime import InferenceSession
from src.models.detection.detector_base import DetectorBase, Model
from src.models.base.yolov8_base import ModelError
from src.utils.boxes import xywh2xyxy, multiclass_nms_class_agnostic
from src.utils.general import get_classes
class YoloDetector(DetectorBase):
def __init__(self):
self._model = None
def init(self, model_path, class_txt_path, confidence_threshold=0.3, iou_threshold=0.45):
| _class_names = get_classes(class_txt_path) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: OthersideAI/self-operating-computer
# Path: operate/utils/style.py
ANSI_BRIGHT_MAGENTA = "\033[95m" if supports_ansi() else "" # Bright magenta text
# Path: operate/dialog.py
def main(model, terminal_prompt, voice_mode=False):
"""
Main function for the Self-Operating Computer.
Parameters:
- model: The model used for generating responses.
- terminal_prompt: A string representing the prompt provided in the terminal.
- voice_mode: A boolean indicating whether to enable voice mode.
Returns:
None
"""
mic = None
# Initialize `WhisperMic`, if `voice_mode` is True
validation(model, voice_mode)
if voice_mode:
try:
from whisper_mic import WhisperMic
# Initialize WhisperMic if import is successful
mic = WhisperMic()
except ImportError:
print(
"Voice mode requires the 'whisper_mic' module. Please install it using 'pip install -r requirements-audio.txt'"
)
sys.exit(1)
# Skip message dialog if prompt was given directly
if not terminal_prompt:
message_dialog(
title="Self-Operating Computer",
text="Ask a computer to do anything.",
style=style,
).run()
else:
print("Running direct prompt...")
print("SYSTEM", platform.system())
# Clear the console
if platform.system() == "Windows":
os.system("cls")
else:
print("\033c", end="")
if terminal_prompt: # Skip objective prompt if it was given as an argument
objective = terminal_prompt
elif voice_mode:
print(
f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RESET} Listening for your command... (speak now)"
)
try:
objective = mic.listen()
except Exception as e:
print(f"{ANSI_RED}Error in capturing voice input: {e}{ANSI_RESET}")
return # Exit if voice input fails
else:
print(f"{ANSI_GREEN}[Self-Operating Computer]\n{ANSI_RESET}{USER_QUESTION}")
print(f"{ANSI_YELLOW}[User]{ANSI_RESET}")
objective = prompt(style=style)
assistant_message = {"role": "assistant", "content": USER_QUESTION}
user_message = {
"role": "user",
"content": f"Objective: {objective}",
}
messages = [assistant_message, user_message]
loop_count = 0
while True:
if config.debug:
print("[loop] messages before next action:\n\n\n", messages[1:])
try:
response = asyncio.run(get_next_action(model, messages, objective))
action = parse_response(response)
action_type = action.get("type")
action_detail = action.get("data")
except ModelNotRecognizedException as e:
print(
f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RED}[Error] -> {e} {ANSI_RESET}"
)
break
except Exception as e:
print(
f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RED}[Error] -> {e} {ANSI_RESET}"
)
break
if action_type == "DONE":
print(
f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_BLUE} Objective complete {ANSI_RESET}"
)
summary = summarize(model, messages, objective)
print(
f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_BLUE} Summary\n{ANSI_RESET}{summary}"
)
break
if action_type != "UNKNOWN":
print(
f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_BRIGHT_MAGENTA} [Act] {action_type} {ANSI_RESET}{action_detail}"
)
function_response = ""
if action_type == "SEARCH":
function_response = search(action_detail)
elif action_type == "TYPE":
function_response = keyboard_type(action_detail)
elif action_type == "CLICK":
function_response = click(action_detail)
else:
print(
f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RED}[Error] something went wrong :({ANSI_RESET}"
)
print(
f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RED}[Error] AI response\n{ANSI_RESET}{response}"
)
break
print(
f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_BRIGHT_MAGENTA} [Act] {action_type} COMPLETE {ANSI_RESET}{function_response}"
)
message = {
"role": "assistant",
"content": function_response,
}
messages.append(message)
loop_count += 1
if loop_count > 15:
break
# Path: operate/main.py
import argparse
from operate.utils.style import ANSI_BRIGHT_MAGENTA
from operate.dialog import main
"""
Self-Operating Computer
"""
def main_entry():
parser = argparse.ArgumentParser(
description="Run the self-operating-computer with a specified model."
)
parser.add_argument(
"-m",
"--model",
help="Specify the model to use",
required=False,
default="gpt-4",
)
# Add a voice flag
parser.add_argument(
"--voice",
help="Use voice input mode",
action="store_true",
)
# Allow for direct input of prompt
parser.add_argument(
"--prompt",
help="Directly input the objective prompt",
type=str,
required=False,
)
try:
args = parser.parse_args()
| main( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: netease-youdao/EmotiVoice
# Path: frontend_cn.py
def split_py(py):
def has_chinese_punctuation(text):
def has_english_punctuation(text):
def number_to_chinese(number):
def tn_chinese(text):
def g2p_cn(text):
# Path: frontend_en.py
ROOT_DIR = os.path.dirname(os.path.abspath("__file__"))
def read_lexicon(lex_path):
def get_eng_phoneme(text, g2p, lexicon, pad_sos_eos=True):
# Path: frontend.py
import re
import sys
from frontend_cn import g2p_cn, re_digits, tn_chinese
from frontend_en import ROOT_DIR, read_lexicon, G2p, get_eng_phoneme
from os.path import isfile
# Copyright 2023, YOUDAO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Thanks to GuGCoCo and PatroxGaurab for identifying the issue:
# the results differ between frontend.py and frontend_en.py. Here's a quick fix.
#re_english_word = re.compile('([a-z\-\.\'\s,;\:\!\?]+|\d+[\d\.]*)', re.I)
re_english_word = re.compile('([^\u4e00-\u9fa5]+|[ \u3002\uff0c\uff1f\uff01\uff1b\uff1a\u201c\u201d\u2018\u2019\u300a\u300b\u3008\u3009\u3010\u3011\u300e\u300f\u2014\u2026\u3001\uff08\uff09\u4e00-\u9fa5]+)', re.I)
def g2p_cn_en(text, g2p, lexicon):
# Our policy dictates that if the text contains Chinese, digits are to be converted into Chinese.
text=tn_chinese(text)
parts = re_english_word.split(text)
parts=list(filter(None, parts))
tts_text = ["<sos/eos>"]
chartype = ''
text_contains_chinese = contains_chinese(text)
for part in parts:
if part == ' ' or part == '': continue
if re_digits.match(part) and (text_contains_chinese or chartype == '') or contains_chinese(part):
if chartype == 'en':
tts_text.append('eng_cn_sp')
phoneme = g2p_cn(part).split()[1:-1]
chartype = 'cn'
elif re_english_word.match(part):
if chartype == 'cn':
if "sp" in tts_text[-1]:
""
else:
tts_text.append('cn_eng_sp')
phoneme = get_eng_phoneme(part, g2p, lexicon, False).split()
if not phoneme :
# tts_text.pop()
continue
else:
chartype = 'en'
else:
continue
tts_text.extend( phoneme )
tts_text=" ".join(tts_text).split()
if "sp" in tts_text[-1]:
tts_text.pop()
tts_text.append("<sos/eos>")
return " ".join(tts_text)
def contains_chinese(text):
pattern = re.compile(r'[\u4e00-\u9fa5]')
match = re.search(pattern, text)
return match is not None
if __name__ == "__main__":
lexicon = read_lexicon(f"{ROOT_DIR}/lexicon/librispeech-lexicon.txt")
| g2p = G2p() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: daveshap/OpenAI_Agent_Swarm
# Path: shared/utils.py
def chat(client, thread, assistant, functions):
while True:
user_message = input("You: ")
# add user message to thread
thread_message = client.beta.threads.messages.create(
thread.id,
role="user",
content=user_message,
)
# get assistant response in thread
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id,
)
# wait for run to complete
wait_time = 0
while True:
if wait_time % 5 == 0:
print(f"waiting for run to complete...", flush=True)
wait_time += 1
time.sleep(1)
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id,
)
if run.status == "completed":
break
elif run.status == "in_progress":
continue
elif run.status == "queued":
continue
elif run.status == "requires_action":
if run.required_action.type == 'submit_tool_outputs':
tool_calls = run.required_action.submit_tool_outputs.tool_calls
tool_outputs = []
for tc in tool_calls:
function_to_call = functions.get(tc.function.name, None)
if not function_to_call:
raise ValueError(f"Function {tc.function.name} not found in execution environment")
function_args = json.loads(tc.function.arguments)
function_response = function_to_call(**function_args)
tool_outputs.append({
"tool_call_id": tc.id,
"output": json.dumps(function_response),
})
print(f"Submitting tool outputs...", flush=True)
run = client.beta.threads.runs.submit_tool_outputs(
thread_id=thread.id,
run_id=run.id,
tool_outputs=tool_outputs
)
else:
input(f'Run status: {run.status}. press enter to continue, or ctrl+c to quit')
# get most recent message from thread
thread_messages = client.beta.threads.messages.list(thread.id, limit=10, order='desc')
# get assistant response from message
assistant_response = thread_messages.data[0].content[0].text.value
print(f"\n\nBot: {assistant_response}\n\n", flush=True)
# continue?
try:
input("Press enter to continue chatting, or ctrl+c to stop chat\n")
except KeyboardInterrupt:
print(f"Stopping chat\n" + 90*"-" + "\n\n", flush=True)
break
# Path: shared/openai_config.py
def get_openai_client():
settings = Settings()
return OpenAI(api_key=settings.OPENAI_API_KEY)
# Path: agents/tool_maker/tool_user.py
import os
import json
from shared.utils import chat as chat_loop
from shared.openai_config import get_openai_client
"""
Create an assistant using the tools from tool_creator using the assistant creation API
"""
client = get_openai_client()
def create_tool_user(assistant_details):
# create the assistant
tool_user = client.beta.assistants.create(**assistant_details["build_params"])
print(f"Created assistant {tool_user.id} to use tools\n\n" + 90*"-" + "\n\n", flush=True)
# save the assistant info to a json file
info_to_export = {
"assistant_id": tool_user.id,
"assistant_details": assistant_details,
}
os.makedirs('assistants', exist_ok=True)
with open('assistants/tool_user.json', 'w') as f:
json.dump(info_to_export, f, indent=4)
return tool_user
def talk_to_tool_user(assistant_details):
"""
talk to the assistant to use the tools
"""
# check if json file exists
try:
os.makedirs('assistants', exist_ok=True)
with open('assistants/tool_user.json') as f:
create_new = input(f'Assistant details found in tool_user.json. Create a new assistant? [y/N]')
if create_new == 'y':
raise Exception("User wants a new assistant")
assistant_from_json = json.load(f)
tool_user = client.beta.assistants.retrieve(assistant_from_json['assistant_id'])
print(f"Loaded assistant details from tool_user.json\n\n" + 90*"-" + "\n\n", flush=True)
print(f'Assistant {tool_user.id}:\n')
assistant_details = assistant_from_json["assistant_details"]
except:
# create the assistant first
tool_user = create_tool_user(assistant_details)
# exec the functions from the py files
os.makedirs('tools', exist_ok=True)
functions = assistant_details["functions"]
for func in functions:
print(f"Loading function {func} into execution environment", flush=True)
with open('tools/' + func + '.py') as f:
exec(f.read(), globals())
functions.update({func: eval(func)})
# Create thread
thread = client.beta.threads.create()
# chat with the assistant
| chat_loop(client, thread, tool_user, functions) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: S-LoRA/S-LoRA
# Path: slora/utils/infer_utils.py
def mark_cost_time(func_name):
def inner_func(func):
def time_func(*args, **kwargs):
if dist.get_rank() in [0, 1] and is_show_cost_time:
torch.cuda.synchronize()
start_time = time.time()
ans = func(*args, **kwargs)
torch.cuda.synchronize()
print(func_name, "cost time:", (time.time() - start_time) * 1000)
return ans
else:
torch.cuda.synchronize()
ans = func(*args, **kwargs)
torch.cuda.synchronize()
return ans
return time_func
return inner_func
# Path: slora/common/basemodel/infer_struct.py
class InferStateInfo:
"""
推理时用的信息结构体
"""
def __init__(self):
self.batch_size = None
self.total_token_num = None
self.b_loc = None
self.b_start_loc = None
self.b_seq_len = None
self.max_len_in_batch = None
self.is_prefill = None
self.mem_manager = None
self.prefill_mem_index = None
self.prefill_key_buffer = None
self.prefill_value_buffer = None
self.decode_is_contiguous = None
self.decode_mem_start = None
self.decode_mem_end = None
self.decode_mem_index = None
self.decode_key_buffer = None
self.decode_value_buffer = None
def init_some_extra_state(self,
model,
batch_size,
total_token_num,
max_len_in_batch,
input_ids : torch.Tensor,
b_loc : torch.Tensor,
b_start_loc : torch.Tensor,
b_seq_len : torch.Tensor,
is_prefill):
pass
# Path: slora/common/basemodel/layer_weights/base_layer_weight.py
class BaseLayerWeight:
def __init__(self):
pass
def load_hf_weights(self, weights):
"""
load weights
"""
pass
def init_static_params(self):
"""
design for some static init params, many model dont need do this.
"""
pass
def verify_load(self):
"""
verify all load is ok
"""
raise Exception("must verify weights load ok")
pass
def _cuda(self, cpu_tensor):
return cpu_tensor.contiguous().to(self.data_type_).cuda()
# Path: slora/common/basemodel/layer_infer/base_layer_infer.py
from slora.utils.infer_utils import mark_cost_time
from slora.common.basemodel.infer_struct import InferStateInfo
from slora.common.basemodel.layer_weights.base_layer_weight import BaseLayerWeight
class BaseLayerInfer:
def __init__(self) -> None:
pass
| @mark_cost_time("pre context forward") # dont to remove this, will make performence down, did not know why |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: disler/multi-agent-postgres-data-analytics
# Path: postgres_da_ai_agent/agents/instruments.py
class AgentInstruments:
"""
Base class for multli-agent instruments that are tools, state, and functions that an agent can use across the lifecycle of conversations
"""
def __init__(self) -> None:
self.session_id = None
self.messages = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def sync_messages(self, messages: list):
"""
Syncs messages with the orchestrator
"""
raise NotImplementedError
def make_agent_chat_file(self, team_name: str):
return os.path.join(self.root_dir, f"agent_chats_{team_name}.json")
def make_agent_cost_file(self, team_name: str):
return os.path.join(self.root_dir, f"agent_cost_{team_name}.json")
@property
def root_dir(self):
return os.path.join(BASE_DIR, self.session_id)
# Path: postgres_da_ai_agent/modules/llm.py
def safe_get(data, dot_chained_keys):
def response_parser(response: Dict[str, Any]):
def prompt(
prompt: str,
model: str = "gpt-4-1106-preview",
instructions: str = "You are a helpful assistant.",
) -> str:
def prompt_func(
prompt: str,
turbo_tools: List[TurboTool],
model: str = "gpt-4-1106-preview",
instructions: str = "You are a helpful assistant.",
) -> str:
def prompt_json_response(
prompt: str,
model: str = "gpt-4-1106-preview",
instructions: str = "You are a helpful assistant.",
) -> str:
def add_cap_ref(
prompt: str, prompt_suffix: str, cap_ref: str, cap_ref_content: str
) -> str:
def count_tokens(text: str):
def estimate_price_and_tokens(text, model="gpt-4"):
# Path: postgres_da_ai_agent/types.py
class Chat:
from_name: str
to_name: str
message: str
created: int = field(default_factory=time.time)
# Path: postgres_da_ai_agent/types.py
class ConversationResult:
success: bool
messages: List[Chat]
cost: float
tokens: int
last_message_str: str
error_message: str
# Path: postgres_da_ai_agent/modules/orchestrator.py
import dataclasses
import json
import autogen
from typing import List, Optional, Tuple
from postgres_da_ai_agent.agents.instruments import AgentInstruments
from postgres_da_ai_agent.modules import llm
from postgres_da_ai_agent.types import Chat, ConversationResult
class Orchestrator:
"""
Orchestrators manage conversations between multi-agent teams.
"""
def __init__(
self,
name: str,
agents: List[autogen.ConversableAgent],
| instruments: AgentInstruments, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: fleet-ai/context
# Path: constants/cli.py
OPENAI_MODELS = [
"gpt-4-1106-preview",
"gpt-4",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
]
# Path: constants/ai.py
SYSTEM_PROMPT = """
You are an expert in Python libraries. You carefully provide accurate, factual, thoughtful, nuanced answers, and are brilliant at reasoning. If you think there might not be a correct answer, you say so.
Each token you produce is another opportunity to use computation, therefore you always spend a few sentences explaining background context, assumptions, and step-by-step thinking BEFORE you try to answer a question.
Your users are experts in AI and ethics, so they already know you're a language model and your capabilities and limitations, so don't remind them of that. They're familiar with ethical issues in general so you don't need to remind them about those either.
Your users are also in a CLI environment. You are capable of writing and running code. DO NOT write hypothetical code. ALWAYS write real code that will execute and run end-to-end.
"""
# Path: constants/ai.py
PROMPT = """
Instructions:
- Be objective, direct. Include literal information from the context, don't add any conclusion or subjective information.
- When writing code, ALWAYS have some sort of output (like a print statement). If you're writing a function, call it at the end. Do not generate the output, because the user can run it themselves.
- ALWAYS cite your sources. Context will be given to you after the text ### Context source_url ### with source_url being the url to the file. For example, ### Context https://example.com/docs/api.html#files ### will have a source_url of https://example.com/docs/api.html#files.
- When you cite your source, please cite it as [num] with `num` starting at 1 and incrementing with each source cited (1, 2, 3, ...). At the bottom, have a newline-separated `num: source_url` at the end of the response. ALWAYS add a new line between sources or else the user won't be able to read it. DO NOT convert links into markdown, EVER! If you do that, the user will not be able to click on the links.
For example:
### Context https://example.com/docs/api.html#pdfs ###
I'm a big fan of PDFs.
### Context https://example.com/docs/api.html#csvs ###
I'm a big fan of CSVs.
### Prompt ###
What is this person a big fan of?
### Response ###
This person is a big fan of PDFs[1] and CSVs[2].
1: https://example.com/docs/api.html#pdfs
2: https://example.com/docs/api.html#csvs
"""
# Path: constants/ai.py
API_URL = "https://foundation.fleet.so"
# Path: utils/ai.py
import os
import json
import tiktoken
import openai
import requests
from openai import OpenAI
from constants.cli import OPENAI_MODELS
from constants.ai import SYSTEM_PROMPT, PROMPT, API_URL
# pylint: disable=W0707
# pylint: disable=W0719
def retrieve(query, k=10, filters=None):
"""Retrieves and returns dict.
Args:
query (str): User query to pass in
k (int, optional): number of results passed back. Defaults to 10.
filters (dict, optional): Filters to apply to the query. You can filter based off
any piece of metadata by passing in a dict of the format {metadata_name: filter_value}
ie {"library_id": "1234"}.
See the README for more details:
https://github.com/fleet-ai/context/tree/main#using-fleet-contexts-rich-metadata
Returns:
list: List of queried results
"""
| url = f"{API_URL}/query" |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: OpenBMB/ProAgent
# Path: ProAgent/loggers/logs.py
class JsonFileHandler(logging.FileHandler):
class JsonFormatter(logging.Formatter):
class Logger(metaclass=Singleton):
class TypingConsoleHandler(logging.StreamHandler):
class ConsoleHandler(logging.StreamHandler):
class AutoGptFormatter(logging.Formatter):
def __init__(self, filename, mode="a", encoding=None, delay=False):
def emit(self, record):
def format(self, record):
def __init__(self):
def typewriter_log(
self, title="", title_color="", content="", speak_text=False, level=logging.INFO
):
def debug(
self,
message,
title="",
title_color="",
):
def info(
self,
message,
title="",
title_color="",
):
def warn(
self,
message,
title="",
title_color="",
):
def error(self, title, message=""):
def _log(
self,
title: str = "",
title_color: str = "",
message: str = "",
level=logging.INFO,
):
def set_level(self, level):
def double_check(self, additionalText=None):
def log_json(self, data: Any, file_name: str) -> None:
def get_log_directory(self):
def emit(self, record):
def emit(self, record) -> None:
def format(self, record: LogRecord) -> str:
def remove_color_codes(s: str) -> str:
def print_action_base(action: Action):
def print_action_tool(action: Action):
# Path: ProAgent/agent/utils.py
def _chat_completion_request(**args):
"""
Generates a chat completion request with the given arguments and attempts to retrieve the completed output.
Args:
**args: Additional keyword arguments for the chat completion request.
Returns:
The completed output if the request is successful, otherwise None.
"""
for i in range(3):
if i > 0:
logger.info(f"LLM retry for the {i+1}'th time")
try:
output, output_code = _chat_completion_request_without_retry(**args)
if output_code == LLMStatusCode.SUCCESS:
return output
except func_timeout.exceptions.FunctionTimedOut: #TLE
logger.info(f"LLM response time out")
continue
# Path: ProAgent/agent/gpt4_function.py
import logging
import json
from typing import List, Dict
from colorama import Fore, Style
from ProAgent.loggers.logs import logger
from ProAgent.agent.utils import _chat_completion_request
class OpenAIFunction():
def __init__(self):
pass
def parse(self, **args):
"""
Parses the given arguments by making a chat completion request.
Args:
**args: The keyword arguments to be passed to the chat completion request.
Returns:
Tuple: A tuple containing the parsed content, function name, function arguments, and the original message.
Raises:
None.
"""
retry_time = 1
max_time = 3
for i in range(max_time):
output = _chat_completion_request(**args)
if isinstance(output, Dict):
usage = output["usage"]
message = output["choices"][0]["message"]
print(usage)
if "function_call" in message.keys():
break
else:
args['messages'].append({"role": "assistant", "content": message['content']})
args['messages'].append({"role": 'user', "content": "No Function call here! You should always use a function call as your response."})
retry_time += 1
| logger._log(f"{Fore.RED} Retry for the {retry_time}'th time{Style.RESET_ALL}") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: LLaVA-VL/LLaVA-Plus-Codebase
# Path: serve/constants.py
WORKER_HEART_BEAT_INTERVAL = int(os.getenv("FASTCHAT_WORKER_HEART_BEAT_INTERVAL", 45))
# Path: serve/constants.py
class ErrorCode(IntEnum):
"""
https://platform.openai.com/docs/guides/error-codes/api-errors
"""
VALIDATION_TYPE_ERROR = 40001
INVALID_AUTH_KEY = 40101
INCORRECT_AUTH_KEY = 40102
NO_PERMISSION = 40103
INVALID_MODEL = 40301
PARAM_OUT_OF_RANGE = 40302
CONTEXT_OVERFLOW = 40303
RATE_LIMIT = 42901
QUOTA_EXCEEDED = 42902
ENGINE_OVERLOADED = 42903
INTERNAL_ERROR = 50001
CUDA_OUT_OF_MEMORY = 50002
GRADIO_REQUEST_ERROR = 50003
GRADIO_STREAM_UNKNOWN_ERROR = 50004
CONTROLLER_NO_WORKER = 50005
CONTROLLER_WORKER_TIMEOUT = 50006
# Path: serve/constants.py
SERVER_ERROR_MSG = (
"**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
)
# Path: serve/utils.py
def build_logger(logger_name, logger_filename):
global handler
formatter = logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
# Set the format of root handlers
if not logging.getLogger().handlers:
if sys.version_info[1] >= 9:
# This is for windows
logging.basicConfig(level=logging.INFO, encoding="utf-8")
else:
if platform.system() == "Windows":
warnings.warn(
"If you are running on Windows, "
"we recommend you use Python >= 3.9 for UTF-8 encoding."
)
logging.basicConfig(level=logging.INFO)
logging.getLogger().handlers[0].setFormatter(formatter)
# Redirect stdout and stderr to loggers
stdout_logger = logging.getLogger("stdout")
stdout_logger.setLevel(logging.INFO)
sl = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = sl
stderr_logger = logging.getLogger("stderr")
stderr_logger.setLevel(logging.ERROR)
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl
# Get logger
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
os.makedirs(LOGDIR, exist_ok=True)
filename = os.path.join(LOGDIR, logger_filename)
handler = logging.handlers.TimedRotatingFileHandler(
filename, when="D", utc=True, encoding="utf-8"
)
handler.setFormatter(formatter)
for logger in [stdout_logger, stderr_logger, logger]:
if logger in visited_loggers:
continue
visited_loggers.add(logger)
logger.addHandler(handler)
return logger
# Path: serve/utils.py
def pretty_print_semaphore(semaphore):
"""Print a semaphore in better format."""
if semaphore is None:
return "None"
return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})"
# Path: serve/blip2grounding_worker.py
import sys, os
import argparse
import asyncio
import dataclasses
import logging
import json
import os
import sys
import time
import threading
import uuid
import base64
import numpy as np
import requests
import groundingdino.datasets.transforms as T
import pycocotools.mask as mask_util
import torch
import torch.nn.functional as F
import uvicorn
from groundingdino.util import box_ops
from segment_anything import build_sam
from segment_anything.predictor import SamPredictor
from typing import List, Tuple, Union
from io import BytesIO
from fastapi import FastAPI, Request, BackgroundTasks
from fastapi.responses import StreamingResponse, JSONResponse
from PIL import Image
from demo.inference_on_a_image import get_grounding_output
from groundingdino.util.inference import load_model, predict
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
LlamaTokenizer,
AutoModel,
)
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
LLaMATokenizer,
AutoModel,
)
from serve.constants import WORKER_HEART_BEAT_INTERVAL, ErrorCode, SERVER_ERROR_MSG
from serve.utils import build_logger, pretty_print_semaphore
"""
A model worker executes the model.
"""
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
try:
except ImportError:
GB = 1 << 30
now_file_name = os.__file__
logdir = "logs/workers/"
os.makedirs(logdir, exist_ok=True)
logfile = os.path.join(logdir, f"{now_file_name}.log")
worker_id = str(uuid.uuid4())[:6]
| logger = build_logger(now_file_name, logfile) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: opendilab/LLMRiddles
# Path: llmriddles/questions/question.py
def register_question(text: Union[Mapping[str, str], str],
checkers: Union[Mapping[str, SingleLangCheckerTyping], MultiLangCheckerTyping],
name=Union[Mapping[str, str], str],
level: int = 1, default_lang='cn'):
checker = checkers if isinstance(checkers, Checker) else Checker(checkers)
if isinstance(text, str):
texts = {default_lang: text}
else:
texts = text
if isinstance(name, str):
names = {default_lang: name}
else:
names = name
_KNOWN_PROBLEMS.append(Question(texts, checker, names, level))
# Path: llmriddles/questions/math_tools.py
def get_all_numbers(text: str):
return get_all_numbers_in_a_sentence(text) + get_all_numbers_in_a_sentence_with_comma(text)
# Path: llmriddles/questions/level2.py
import re
import sympy
from typing import Optional, Tuple
from .question import register_question
from .math_tools import get_all_numbers
CN_TEXT_1 = """
第二章第一题(质数长度),你需要提出一个字数是质数的问题,使回答的长度刚好是它的下一个质数。
"""
EN_TEXT_1 = """
For the first question in chapter 2, You need to come up with a question that has a prime number of words, so the answer's length is exactly the next prime number.
"""
def _is_prime(v):
return sympy.isprime(v)
def _next_prime(v):
while v:
v += 1
if _is_prime(v):
return v
def _cn_checker_1(question_text: str, user_text: str, answer_text: str) -> Tuple[bool, Optional[str]]:
qs_length = len(user_text.strip())
if not _is_prime(qs_length):
return False, f'问题长度为{qs_length},非质数'
answer_value = len(answer_text)
next_prime = _next_prime(qs_length)
if answer_value != next_prime:
return False, f'下一个质数为{next_prime},但回答长度为{answer_value}'
return True, None
def _en_words(text: str):
return len(re.findall(r'\w+', text))
def _en_checker_1(question_text: str, user_text: str, answer_text: str) -> Tuple[bool, Optional[str]]:
qs_length = _en_words(user_text.strip())
if not _is_prime(qs_length):
return False, f'The question has a length of {qs_length}, which is not a prime number'
answer_value = _en_words(answer_text)
next_prime = _next_prime(qs_length)
if answer_value != next_prime:
return False, f'The next prime number is {next_prime}, but the answer\'s length is {answer_value}'
return True, None
| register_question( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: codefuse-ai/CodeFuse-ModelCache
# Path: modelcache/utils/error.py
class NotFoundError(CacheError):
"""Raise when getting an unsupported store."""
def __init__(self, store_type, current_type_name):
super().__init__(f"Unsupported ${store_type}: {current_type_name}")
# Path: modelcache/utils/error.py
class ParamError(CacheError):
"""Raise when receiving an invalid param."""
# Path: modelcache/manager/vector_data/manager.py
from modelcache.utils.error import NotFoundError, ParamError
from modelcache.manager.vector_data.milvus import Milvus
from modelcache.manager.vector_data.faiss import Faiss
from modelcache.manager.vector_data.chroma import Chromadb
from modelcache.manager.vector_data.hnswlib_store import Hnswlib
# -*- coding: utf-8 -*-
TOP_K = 1
FAISS_INDEX_PATH = "faiss.index"
DIMENSION = 0
MILVUS_HOST = "localhost"
MILVUS_PORT = 19530
MILVUS_USER = ""
MILVUS_PSW = ""
MILVUS_SECURE = False
MILVUS_INDEX_PARAMS = {
"metric_type": "L2",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
}
COLLECTION_NAME = "modelcache"
class VectorBase:
"""
VectorBase to manager the vector base.
"""
def __init__(self):
raise EnvironmentError(
"VectorBase is designed to be instantiated, please using the `VectorBase.get(name)`."
)
@staticmethod
def check_dimension(dimension):
if dimension <= 0:
raise ParamError(
f"the dimension should be greater than zero, current value: {dimension}."
)
@staticmethod
def get(name, **kwargs):
top_k = kwargs.get("top_k", TOP_K)
if name == "milvus":
dimension = kwargs.get("dimension", DIMENSION)
milvus_config = kwargs.get("milvus_config")
VectorBase.check_dimension(dimension)
host = milvus_config.get('milvus', 'host')
port = milvus_config.get('milvus', 'port')
user = milvus_config.get('milvus', 'user')
password = milvus_config.get('milvus', 'password')
secure = kwargs.get("secure", MILVUS_SECURE)
collection_name = kwargs.get("collection_name", COLLECTION_NAME)
index_params = kwargs.get("index_params", MILVUS_INDEX_PARAMS)
search_params = kwargs.get("search_params", None)
local_mode = kwargs.get("local_mode", False)
local_data = kwargs.get("local_data", "./milvus_data")
vector_base = Milvus(
host=host,
port=port,
user=user,
password=password,
secure=secure,
collection_name=collection_name,
dimension=dimension,
top_k=top_k,
index_params=index_params,
search_params=search_params,
local_mode=local_mode,
local_data=local_data
)
elif name == "faiss":
dimension = kwargs.get("dimension", DIMENSION)
index_path = kwargs.pop("index_path", FAISS_INDEX_PATH)
VectorBase.check_dimension(dimension)
vector_base = Faiss(
index_file_path=index_path, dimension=dimension, top_k=top_k
)
elif name == "chromadb":
client_settings = kwargs.get("client_settings", None)
persist_directory = kwargs.get("persist_directory", None)
collection_name = kwargs.get("collection_name", COLLECTION_NAME)
vector_base = Chromadb(
client_settings=client_settings,
persist_directory=persist_directory,
collection_name=collection_name,
top_k=top_k,
)
elif name == "hnswlib":
dimension = kwargs.get("dimension", DIMENSION)
index_path = kwargs.pop("index_path", "./hnswlib_index.bin")
max_elements = kwargs.pop("max_elements", 100000)
VectorBase.check_dimension(dimension)
vector_base = Hnswlib(
index_file_path=index_path, dimension=dimension,
top_k=top_k, max_elements=max_elements
)
else:
| raise NotFoundError("vector store", name) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ForceFledgling/proxyhub
# Path: proxyhub/errors.py
class BadStatusLine(Exception):
errmsg = 'bad_status_line'
# Path: proxyhub/utils.py
def get_all_ip(page):
# TODO: add IPv6 support
return set(IPPattern.findall(page))
# Path: proxyhub/utils.py
def get_status_code(resp, start=9, stop=12):
try:
if not isinstance(resp, (bytes, str)):
raise TypeError(f'{type(resp).__name__} is not supported')
code = int(resp[start:stop])
except ValueError:
return 400 # Bad Request
else:
return code
# Path: proxyhub/utils.py
def parse_headers(headers):
headers = headers.decode('utf-8', 'ignore').split('\r\n')
_headers = {}
_headers.update(parse_status_line(headers.pop(0)))
for h in headers:
if not h:
break
name, val = h.split(':', 1)
_headers[name.strip().title()] = val.strip()
if ':' in _headers.get('Host', ''):
host, port = _headers['Host'].split(':')
_headers['Host'], _headers['Port'] = host, int(port)
return _headers
# Path: proxyhub/utils.py
def parse_status_line(line):
_headers = {}
is_response = line.startswith('HTTP/')
try:
if is_response: # HTTP/1.1 200 OK
version, status, *reason = line.split()
else: # GET / HTTP/1.1
method, path, version = line.split()
except ValueError:
raise BadStatusLine(line)
_headers['Version'] = version.upper()
if is_response:
_headers['Status'] = int(status)
reason = ' '.join(reason)
reason = reason.upper() if reason.lower() == 'ok' else reason.title()
_headers['Reason'] = reason
else:
_headers['Method'] = method.upper()
_headers['Path'] = path
if _headers['Method'] == 'CONNECT':
host, port = path.split(':')
_headers['Host'], _headers['Port'] = host, int(port)
return _headers
# Path: tests/test_utils.py
import pytest
from proxyhub.errors import BadStatusLine
from proxyhub.utils import (
get_all_ip,
get_status_code,
parse_headers,
parse_status_line,
)
def test_get_all_ip():
page = "abc127.0.0.1:80abc127.0.0.1xx127.0.0.2:8080h"
assert get_all_ip(page) == {'127.0.0.1', '127.0.0.2'}
def test_get_status_code():
assert get_status_code('HTTP/1.1 200 OK\r\n') == 200
assert get_status_code('<html>123</html>\r\n') == 400
assert get_status_code(b'HTTP/1.1 403 Forbidden\r\n') == 403
assert get_status_code(b'HTTP/1.1 400 Bad Request\r\n') == 400
def test_parse_status_line():
| assert parse_status_line('HTTP/1.1 200 OK') == { |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: WithSecureLabs/IceKube
# Path: icekube/config.py
class Neo4j(TypedDict):
class Config(TypedDict):
# Path: icekube/icekube.py
def create_indices():
for resource in api_resources():
if "list" not in resource.verbs:
continue
kind = resource.kind
namespace = resource.namespaced
cmd = f"CREATE INDEX {kind.lower()} IF NOT EXISTS "
cmd += f"FOR (n:{kind}) ON (n.name"
if namespace:
cmd += ", n.namespace"
cmd += ")"
with get_driver().session() as session:
session.run(cmd)
# Path: icekube/icekube.py
def enumerate_resource_kind(
ignore: Optional[List[str]] = None,
):
if ignore is None:
ignore = []
with get_driver().session() as session:
cluster = Cluster(apiVersion="N/A", name=context_name(), version=kube_version())
cmd, kwargs = create(cluster)
session.run(cmd, **kwargs)
signers = [
"kubernetes.io/kube-apiserver-client",
"kubernetes.io/kube-apiserver-client-kubelet",
"kubernetes.io/kubelet-serving",
"kubernetes.io/legacy-unknown",
]
for signer in signers:
s = Signer(name=signer)
cmd, kwargs = create(s)
session.run(cmd, **kwargs)
for resource in all_resources(ignore=ignore):
cmd, kwargs = create(resource)
session.run(cmd, **kwargs)
# Path: icekube/icekube.py
def generate_relationships(threaded: bool = False) -> None:
logger.info("Generating relationships")
logger.info("Fetching resources from neo4j")
driver = get_driver()
resources = find()
logger.info("Fetched resources from neo4j")
generator = partial(relationship_generator, driver, True)
if threaded:
with ThreadPoolExecutor() as exc:
exc.map(generator, resources)
else:
print("First pass for relationships")
for resource in tqdm(resources):
generator(resource)
print("")
# Do a second loop across relationships to handle objects created as part
# of other relationships
resources = find()
generator = partial(relationship_generator, driver, False)
if threaded:
with ThreadPoolExecutor() as exc:
exc.map(generator, resources)
else:
print("Second pass for relationships")
for resource in tqdm(resources):
generator(resource)
print("")
# Path: icekube/icekube.py
def purge_neo4j() -> None:
with get_driver().session() as session:
session.run("MATCH (x)-[r]-(y) DELETE x, r, y")
session.run("MATCH (x) DELETE x")
# Path: icekube/icekube.py
def remove_attack_paths() -> None:
with get_driver().session() as session:
session.run("MATCH ()-[r]-() WHERE EXISTS (r.attack_path) DELETE r")
# Path: icekube/icekube.py
def setup_attack_paths() -> None:
print("Generating attack paths")
for relationship, query in tqdm(attack_paths.items()):
with get_driver().session() as session:
if isinstance(query, str):
query = [query]
for q in query:
cmd = q + f" MERGE (src)-[:{relationship} {{ attack_path: 1 }}]->(dest)"
session.run(cmd)
print("")
# Path: icekube/kube.py
def load_kube_config():
def kube_version() -> str:
def context_name() -> str:
def api_versions() -> List[str]:
def api_resources() -> List[APIResource]:
def all_resources(
preferred_versions_only: bool = True,
ignore: Optional[List[str]] = None,
) -> Iterator[Resource]:
def metadata_download() -> Dict[str, Any]:
# Path: icekube/log_config.py
def build_logger(debug_level=logging.DEBUG):
# create logger
logger = logging.getLogger("icekube")
logger.setLevel(debug_level)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(debug_level)
# create formatter and add it to the handlers
formatter = logging.Formatter("%(asctime)s|%(name)s|%(levelname)s|%(message)s")
ch.setFormatter(formatter)
# tell tqdm about the handler
tqdm_handler = _TqdmLoggingHandler(std_tqdm)
tqdm_handler.setFormatter(formatter)
tqdm_handler.stream = ch.stream
# add the handlers to the logger
logger.addHandler(tqdm_handler)
# Path: icekube/cli.py
import json
import logging
import typer
from pathlib import Path
from typing import Iterator, List, Optional, cast
from icekube.config import config
from icekube.icekube import (
create_indices,
enumerate_resource_kind,
generate_relationships,
purge_neo4j,
remove_attack_paths,
setup_attack_paths,
)
from icekube.kube import (
APIResource,
Resource,
all_resources,
metadata_download,
)
from icekube.log_config import build_logger
from tqdm import tqdm
from icekube import kube
from icekube import icekube
app = typer.Typer()
IGNORE_DEFAULT = "events,componentstatuses"
@app.command()
def run(
ignore: str = typer.Option(
IGNORE_DEFAULT,
help="Names of resource types to ignore",
),
):
enumerate(ignore)
attack_path()
@app.command()
def enumerate(
ignore: str = typer.Option(
IGNORE_DEFAULT,
help="Names of resource types to ignore",
),
):
create_indices()
enumerate_resource_kind(ignore.split(","))
generate_relationships()
@app.command()
def relationships():
generate_relationships()
@app.command()
def attack_path():
| remove_attack_paths() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: IAAR-Shanghai/UHGEval
# Path: uhgeval/llm/api.py
class Baichuan2_53B_Chat(BaseLLM):
def request(self, query) -> str:
import time
url = conf.Baichuan2_53B_url
api_key = conf.Baichuan2_53B_api_key
secret_key = conf.Baichuan2_53B_secret_key
time_stamp = int(time.time())
json_data = json.dumps({
"model": "Baichuan2-53B",
"messages": [
{
"role": "user",
"content": query
}
],
"parameters": {
"temperature": self.params['temperature'],
"top_p": self.params['top_p'],
"top_k": self.params['top_k'],
}
})
def _calculate_md5(input_string):
import hashlib
md5 = hashlib.md5()
md5.update(input_string.encode('utf-8'))
encrypted = md5.hexdigest()
return encrypted
signature = _calculate_md5(secret_key + json_data + str(time_stamp))
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + api_key,
"X-BC-Timestamp": str(time_stamp),
"X-BC-Signature": signature,
"X-BC-Sign-Algo": "MD5",
}
res = requests.post(url, data=json_data, headers=headers)
res = res.json()['data']['messages'][0]['content']
return res
# Path: uhgeval/llm/api.py
class GPT(BaseLLM):
def __init__(self, model_name='gpt-3.5-turbo', temperature=1.0, max_new_tokens=1024, report=False):
super().__init__(model_name, temperature, max_new_tokens)
self.report = report
def request(self, query: str) -> str:
openai.api_key = conf.GPT_api_key
res = openai.ChatCompletion.create(
model = self.params['model_name'],
messages = [{"role": "user","content": query}],
temperature = self.params['temperature'],
max_tokens = self.params['max_new_tokens'],
top_p = self.params['top_p'],
)
real_res = res["choices"][0]["message"]["content"]
token_consumed = res['usage']['total_tokens']
logger.info(f'GPT token consumed: {token_consumed}') if self.report else ()
return real_res
# Path: tests/llm/test_api.py
import unittest
from uhgeval.llm.api import (
Baichuan2_53B_Chat,
GPT,
)
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
class TestBaichuan253BChat(unittest.TestCase):
def setUp(self):
self.model = Baichuan2_53B_Chat(temperature=0.1)
def test_request(self):
query = "How are you?"
response = self.model.request(query)
self.assertIsInstance(response, str)
self.assertGreater(len(response), 0)
def test_continue_writing(self):
obj = {"headLine": "Story", "broadcastDate": "2023-11-15", "newsBeginning": "Once upon a time, there is a"}
result = self.model.continue_writing(obj)
self.assertIsInstance(result, str)
self.assertGreater(len(result), 0)
class TestGPT(unittest.TestCase):
def setUp(self):
| self.gpt35 = GPT(model_name='gpt-3.5-turbo', temperature=0.1) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mobiusml/hqq
# Path: hqq/engine/hf.py
_HQQ_REGISTRY = {}
_HQQ_REGISTRY = _HQQ_REGISTRY
class HQQModelForCausalLM(_Parent, HQQWrapper):
def __init__(self, *args, **kwargs):
def _make_quantizable(cls, model, quantized):
def _validate_params(cls, params:Dict):
def from_pretrained(cls, *args, **kwargs):
def _get_arch_key_from_save_dir(cls, save_dir:str):
# Path: hqq/core/peft.py
class PeftUtils:
@classmethod
def get_base_class(cls, model, base_class):
#Get base class
if((base_class is None) and hasattr(model, 'base_class')):
base_class = model.base_class
assert (base_class is not None), "You need to provide the base HQQ class (LlamaHQQ, MixtralHQQ, etc.) as model.base_class or as an argument base_class=LlamaHQQ"
return base_class
@classmethod
def add_lora(cls, model, lora_params, base_class=None, verbose=True):
#Base classs
base_class = cls.get_base_class(model, base_class)
#Freeze
for param in model.parameters():
param.requires_grad = False
#Patch
base_class.patch_linearlayers(model, patch_linear_add_peft, lora_params, verbose=verbose)
#Rename modules
autoname_modules(model)
#Default backprop backend
HQQLinear.set_backend(HQQBackend.PYTORCH_BACKPROP)
@classmethod
def merge_lora(cls, model, merge_lora_params, base_class=None, verbose=True):
#Base classs
base_class = cls.get_base_class(model, base_class)
#Patch
base_class.patch_linearlayers(model, patch_linear_merge_peft, merge_lora_params, verbose=verbose)
@classmethod
def cast_lora_weights(cls, model, dtype, base_class=None, verbose=True):
#Base classs
base_class = cls.get_base_class(model, base_class)
#Linear tags
linear_tags = base_class.get_linear_tags()
#Patch
base_class.patch_linearlayers(model,
patch_linear_cast_peft,
dict([(linear_tag, dtype) for linear_tag in linear_tags]),
verbose=verbose)
@classmethod
def save_lora_weights(cls, model, filename, base_class=None, verbose=True):
#Base classs
base_class = cls.get_base_class(model, base_class)
lora_global_params = {}
def _patch_linear_save_weights(layer, patch_params, return_layer=True):
if(is_hqq_lora_layer(layer)):
lora_global_params[layer.name] = layer.state_dict()
if(return_layer): return layer
#Linear tags
linear_tags = base_class.get_linear_tags()
#Patch
base_class.patch_linearlayers(model,
_patch_linear_save_weights,
dict([(linear_tag, None) for linear_tag in linear_tags]),
verbose=verbose)
#save
torch.save(lora_global_params, filename)
@classmethod
def load_lora_weights(cls, model, filename, base_class=None, verbose=True):
#Base classs
base_class = cls.get_base_class(model, base_class)
lora_global_params = torch.load(file, map_location='cpu')
def _patch_linear_load_weights(layer, patch_params, return_layer=True):
if(is_hqq_lora_layer(layer)):
layer.load_state_dict(lora_global_params[layer.name])
if(return_layer): return layer
#Linear tags
linear_tags = base_class.get_linear_tags()
#Patch
base_class.patch_linearlayers(model,
_patch_linear_load_weights,
dict([(linear_tag, None) for linear_tag in linear_tags]),
verbose=verbose)
# Path: examples/lora/train_hqq_lora_example.py
from hqq.engine.hf import HQQModelForCausalLM, AutoTokenizer
from hqq.core.quantize import *
from hqq.core.peft import PeftUtils
from hqq.core.quantize import *
from datasets import load_dataset, Dataset
from tqdm import tqdm
from trl import SFTTrainer
import transformers
import numpy as np
import random
#Settings
######################################################################################
hf_auth = None #HuggingFace token
cache_path = '' #cache directory to store data
#Chose a model
model_id = "meta-llama/Llama-2-7b-hf"
#model_id = "meta-llama/Llama-2-13b-hf"
#model_id = "meta-llama/Llama-2-70b-hf"
#HQQ Quantize
######################################################################################
model = HQQModelForCausalLM.from_pretrained(model_id, use_auth_token=hf_auth, cache_dir=cache_path)
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=hf_auth, cache_dir=cache_path)
#Quantize the model
quant_config = BaseQuantizeConfig(nbits=4, group_size=64, quant_scale=False, quant_zero=False)
model.quantize_model(quant_config=quant_config)
#Add Peft
######################################################################################
train_dtype = torch.bfloat16 #torch.float32 / torch.bfloat16
base_lora_params = {'lora_type':'default', 'r':32, 'lora_alpha':64, 'dropout':0.05, 'train_dtype':train_dtype}
lora_params = {'self_attn.q_proj': base_lora_params,
'self_attn.k_proj': base_lora_params,
'self_attn.v_proj': base_lora_params,
'self_attn.o_proj': base_lora_params,
'mlp.gate_proj' : None,
'mlp.up_proj' : None,
'mlp.down_proj' : None}
#Apply LoRA
| PeftUtils.add_lora(model, lora_params) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TheFunny/ArisuAutoSweeper
# Path: module/logger/logger.py
def empty_function(*args, **kwargs):
def __init__(self, *args, func: Callable[[ConsoleRenderable], None] = None, **kwargs):
def emit(self, record: logging.LogRecord) -> None:
def handle(self, record: logging.LogRecord) -> bool:
def options(self) -> ConsoleOptions:
def _set_file_logger(name=pyw_name):
def set_file_logger(name=pyw_name):
def set_func_logger(func):
def _get_renderables(
self: Console, *objects, sep=" ", end="\n", justify=None, emoji=None, markup=None, highlight=None,
) -> List[ConsoleRenderable]:
def print(*objects: ConsoleRenderable, **kwargs):
def rule(title="", *, characters="─", style="rule.line", end="\n", align="center"):
def hr(title, level=3):
def attr(name, text):
def attr_align(name, text, front='', align=22):
def show():
def error_convert(func):
def error_wrapper(msg, *args, **kwargs):
class RichFileHandler(RichHandler):
class RichRenderableHandler(RichHandler):
class HTMLConsole(Console):
class Highlighter(RegexHighlighter):
WEB_THEME = Theme({
"web.brace": Style(bold=True),
"web.bool_true": Style(color="bright_green", italic=True),
"web.bool_false": Style(color="bright_red", italic=True),
"web.none": Style(color="magenta", italic=True),
"web.path": Style(color="magenta"),
"web.filename": Style(color="bright_magenta"),
"web.str": Style(color="green", italic=False, bold=False),
"web.time": Style(color="cyan"),
"rule.text": Style(bold=True),
})
# Path: module/webui/setting.py
class State:
"""
Shared settings
"""
_init = False
_clearup = False
restart_event: threading.Event = None
manager: SyncManager = None
electron: bool = False
theme: str = "default"
@classmethod
def init(cls):
cls.manager = multiprocessing.Manager()
cls._init = True
@classmethod
def clearup(cls):
cls.manager.shutdown()
cls._clearup = True
@cached_class_property
def deploy_config(self) -> "DeployConfig":
"""
Returns:
DeployConfig:
"""
from module.webui.config import DeployConfig
return DeployConfig()
@cached_class_property
def config_updater(self) -> "ConfigUpdater":
"""
Returns:
ConfigUpdater:
"""
from module.config.config_updater import ConfigUpdater
return ConfigUpdater()
# Path: gui.py
import threading
import argparse
import asyncio
import sys
import uvicorn
from multiprocessing import Event, Process
from module.logger import logger
from module.webui.setting import State
from module.logger.logger import console_hdlr
def func(ev: threading.Event):
if sys.platform.startswith("win"):
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
| State.restart_event = ev |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: liuzhao1225/YouDub
# Path: youdub/utils.py
def save_wav(wav: np.ndarray, path: str, sample_rate: int = 24000) -> None:
"""Save float waveform to a file using Scipy.
Args:
wav (np.ndarray): Waveform with float values in range [-1, 1] to save.
path (str): Path to a output file.
sample_rate (int, optional): Sampling rate used for saving to the file. Defaults to 24000.
"""
# wav_norm = wav * (32767 / max(0.01, np.max(np.abs(wav))))
wav_norm = wav * 32767
wavfile.write(path, sample_rate, wav_norm.astype(np.int16))
# Path: youdub/utils.py
def adjust_audio_length(wav, src_path, dst_path, desired_length: float, sample_rate: int = 24000) -> np.ndarray:
"""Adjust the length of the audio.
Args:
wav (np.ndarray): Original waveform.
sample_rate (int): Sampling rate of the audio.
desired_length (float): Desired length of the audio in seconds.
Returns:
np.ndarray: Waveform with adjusted length.
"""
current_length = wav.shape[0] / sample_rate
speed_factor = max(min(desired_length / current_length, 1.1), 2/3)
desired_length = current_length * speed_factor
stretch_audio(src_path, dst_path, ratio=speed_factor,
sample_rate=sample_rate)
y, sr = librosa.load(dst_path, sr=sample_rate)
return y[:int(desired_length * sr)], desired_length
# Path: youdub/tts_paddle.py
import os, sys
import numpy as np
import json
import logging
from paddlespeech.cli.tts import TTSExecutor
from youdub.utils import save_wav, adjust_audio_length
sys.path.append(os.getcwd())
class TTS_Clone:
def __init__(self, model_path="fastspeech2_male", voc='pwgan_male',device='gpu:0', language='mix'):
logging.info(f'Loading TTS model {model_path}...')
self.am = model_path
self.voc = voc
self.tts = TTSExecutor()
self.language = language
logging.info('Model TTS loaded.')
def inference(self, text, output) -> np.ndarray:
self.tts(
text=text,
am=self.am,
voc=self.voc,
lang=self.language,
output=output,
use_onnx=True)
print(f'{output}: {text}')
return self.tts._outputs['wav']
def process_folder(folder, tts: TTS_Clone):
logging.info(f'TTS processing folder {folder}...')
with open(os.path.join(folder, 'zh.json'), 'r', encoding='utf-8') as f:
transcript = json.load(f)
full_wav = []
if not os.path.exists(os.path.join(folder, 'temp')):
os.makedirs(os.path.join(folder, 'temp'))
previous_end = 0
for i, line in enumerate(transcript):
text = line['text']
start = line['start']
end = line['end']
wav = tts.inference(text, os.path.join(folder, 'temp', f'zh_{i}.wav'))
| wav_adjusted = adjust_audio_length(wav, os.path.join(folder, 'temp', f'zh_{i}.wav'), os.path.join( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dtiesling/flask-muck
# Path: tests/app.py
class GuardianModel(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String, nullable=False, unique=True)
age = db.Column(db.Integer, nullable=True)
family_id = db.Column(db.Integer, db.ForeignKey(FamilyModel.id))
family = db.relationship(FamilyModel)
children: Mapped[list["ChildModel"]] = db.relationship()
# Path: tests/app.py
class ToyApiView(BaseApiView):
api_name = "toy"
Model = ToyModel
ResponseSchema = ToySchema
CreateSchema = ToySchema
PatchSchema = ToySchema
UpdateSchema = ToySchema
parent = ChildApiView
one_to_one_api = True
# Path: tests/app.py
class ChildModel(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String, nullable=False)
age = db.Column(db.Integer, nullable=True)
family_id = db.Column(db.Integer, db.ForeignKey(FamilyModel.id))
guardian_id = db.Column(db.Integer, db.ForeignKey(GuardianModel.id))
guardian = db.relationship(GuardianModel, back_populates="children")
toy: Mapped["ToyModel"] = db.relationship(uselist=False)
# Path: tests/app.py
class ToyModel(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String, nullable=False)
family_id = db.Column(db.Integer, db.ForeignKey(FamilyModel.id))
child_id = db.Column(db.Integer, db.ForeignKey(ChildModel.id))
child = db.relationship(ChildModel, back_populates="toy")
# Path: tests/app.py
class BaseApiView(FlaskMuckApiView):
"""Base view to inherit from. Helpful for setting class variables shared with all API views such as "sqlalchemy_db"
and "decorators".
"""
session = db.session
decorators = [login_required]
pre_create_callbacks = [PreCallback]
pre_update_callbacks = [PreCallback]
pre_patch_callbacks = [PreCallback]
pre_delete_callbacks = [PreCallback]
post_create_callbacks = [PostCallback]
post_update_callbacks = [PostCallback]
post_patch_callbacks = [PostCallback]
post_delete_callbacks = [PostCallback]
# Path: tests/app.py
class PreCallback(FlaskMuckCallback):
def execute(self) -> None:
return
# Path: tests/app.py
class PostCallback(FlaskMuckCallback):
def execute(self) -> None:
return
# Path: tests/app.py
class GuardianApiView(BaseApiView):
api_name = "guardians"
Model = GuardianModel
ResponseSchema = GuardianSchema
CreateSchema = GuardianSchema
PatchSchema = GuardianSchema
UpdateSchema = GuardianSchema
DetailSchema = GuardianDetailSchema
searchable_columns = [GuardianModel.name, GuardianModel.age]
# Path: tests/test.py
import json
import pytest
from unittest.mock import patch
from pydantic import BaseModel, ConfigDict
from flask_muck.exceptions import MuckImplementationError
from flask_muck.utils import (
get_url_rule,
get_fk_column,
get_query_filters_from_request_path,
get_join_models_from_parent_views,
)
from tests.app import (
GuardianModel,
ToyApiView,
ChildModel,
ToyModel,
BaseApiView,
PreCallback,
PostCallback,
GuardianApiView,
)
class TestBasicCrud:
def test_create(self, post, user):
response = post("/guardians/", json={"name": "Jill"})
parent = GuardianModel.query.one()
assert response == {"name": parent.name}
# Verify integrity errors are handled.
post("/guardians/", json={"name": "Jill"}, expected_status_code=409)
def test_read(self, get, user, guardian, child):
assert get(f"/guardians/") == [{"name": guardian.name}]
assert get(f"/guardians/{guardian.id}/") == {
"name": "Samantha",
"children": [{"name": "Tamara"}],
}
def test_update(self, put, patch, guardian):
assert put(f"/guardians/{guardian.id}/", json={"name": "updated"}) == {
"name": "updated"
}
assert patch(f"/guardians/{guardian.id}/", json={"name": "patched"}) == {
"name": "patched"
}
def test_delete(self, client, guardian):
client.delete(f"/guardians/{guardian.id}/")
assert GuardianModel.query.count() == 0
class TestAllowedMethods:
def test_get_only(self, client, monkeypatch):
| monkeypatch.setattr(BaseApiView, "allowed_methods", {"GET"}) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: BrianPugh/cyclopts
# Path: cyclopts/_convert.py
def _bool(s: str) -> bool:
def _int(s: str) -> int:
def _bytes(s: str) -> bytes:
def _bytearray(s: str) -> bytearray:
def _convert(type_, element, converter=None):
def get_origin_and_validate(type_: Type):
def resolve(type_: Type) -> Type:
def resolve_optional(type_: Type) -> Type:
def resolve_annotated(type_: Type) -> Type:
def convert(type_: Type, *args: str, converter: Optional[Callable] = None):
def token_count(type_: Union[Type, inspect.Parameter]) -> Tuple[int, bool]:
def to_tuple_converter(value: Union[None, Any, Iterable[Any]]) -> Tuple[Any, ...]:
def to_list_converter(value: Union[None, Any, Iterable[Any]]) -> List[Any]:
def optional_to_tuple_converter(value: Union[None, Any, Iterable[Any]]) -> Optional[Tuple[Any, ...]]:
# Path: cyclopts/group.py
class Group:
name: str = ""
help: str = ""
# All below parameters are keyword-only
_show: Optional[bool] = field(default=None, alias="show", kw_only=True)
_sort_key: Any = field(
default=None,
alias="sort_key",
converter=lambda x: NO_USER_SORT_KEY if x is None else x,
)
converter: Optional[Callable] = field(default=None, kw_only=True)
validator: Tuple[Callable, ...] = field(
default=None,
converter=lambda x: cast(Tuple[Callable, ...], to_tuple_converter(x)),
kw_only=True,
)
default_parameter: Optional["Parameter"] = field(
default=None,
validator=_group_default_parameter_must_be_none,
kw_only=True,
)
def __str__(self):
return self.name
@property
def show(self):
return bool(self.name) if self._show is None else self._show
@show.setter
def show(self, value):
self._show = value
@property
def sort_key(self):
return None if self._sort_key is NO_USER_SORT_KEY else self._sort_key
@sort_key.setter
def sort_key(self, value):
self._sort_key = value
@classmethod
def create_default_arguments(cls):
return cls("Arguments")
@classmethod
def create_default_parameters(cls):
return cls("Parameters")
@classmethod
def create_default_commands(cls):
return cls("Commands")
@classmethod
def create_ordered(cls, *args, sort_key=None, **kwargs):
"""Create a group with a globally incremented :attr:`~Group.sort_key`.
Used to create a group that will be displayed **after** a previously declared :meth:`Group.create_ordered` group on the help-page.
If a :attr:`~Group.sort_key` is provided, it is **prepended** to the globally incremented counter value (i.e. has priority during sorting).
"""
count = next(_sort_key_counter)
if sort_key is None:
sort_key = (NO_USER_SORT_KEY, count)
elif is_iterable(sort_key):
sort_key = (tuple(sort_key), count)
else:
sort_key = (sort_key, count)
return cls(*args, sort_key=sort_key, **kwargs)
# Path: cyclopts/utils.py
def record_init(target: str):
"""Class decorator that records init argument names as a tuple to ``target``."""
def decorator(cls):
original_init = cls.__init__
signature = inspect.signature(original_init)
@functools.wraps(original_init)
def new_init(self, *args, **kwargs):
bound = signature.bind(self, *args, **kwargs)
original_init(self, *args, **kwargs)
# Circumvent frozen protection.
object.__setattr__(self, target, tuple(k for k, v in bound.arguments.items() if v is not self))
cls.__init__ = new_init
return cls
return decorator
# Path: cyclopts/parameter.py
import inspect
import attrs
from typing import Any, Callable, Optional, Tuple, Type, Union, cast, get_args, get_origin
from attrs import field, frozen
from cyclopts._convert import (
AnnotatedType,
convert,
get_origin_and_validate,
optional_to_tuple_converter,
resolve,
resolve_optional,
to_tuple_converter,
)
from cyclopts.group import Group
from cyclopts.utils import record_init
def _double_hyphen_validator(instance, attribute, values):
if not values:
return
for value in values:
if value is not None and not value.startswith("--"):
raise ValueError(f'{attribute.alias} value must start with "--".')
def _negative_converter(default: Tuple[str, ...]):
def converter(value) -> Tuple[str, ...]:
if value is None:
return default
else:
return to_tuple_converter(value)
return converter
@record_init("_provided_args")
@frozen
class Parameter:
"""Cyclopts configuration for individual function parameters."""
# All documentation has been moved to ``docs/api.rst`` for greater control with attrs.
name: Tuple[str, ...] = field(
default=None,
converter=lambda x: cast(Tuple[str, ...], to_tuple_converter(x)),
)
| converter: Callable = field(default=None, converter=attrs.converters.default_if_none(convert)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: RoboFlamingo/RoboFlamingo
# Path: open_flamingo/open_flamingo/src/helpers.py
class GatedCrossAttentionBlock(nn.Module):
def __init__(
self,
*,
dim,
dim_visual,
dim_head=64,
heads=8,
ff_mult=4,
only_attend_immediate_media=True,
):
super().__init__()
self.attn = MaskedCrossAttention(
dim=dim,
dim_visual=dim_visual,
dim_head=dim_head,
heads=heads,
only_attend_immediate_media=only_attend_immediate_media,
)
self.attn_gate = nn.Parameter(torch.tensor([0.0]))
self.ff = FeedForward(dim, mult=ff_mult)
self.ff_gate = nn.Parameter(torch.tensor([0.0]))
def forward(
self,
x,
media,
media_locations=None,
use_cached_media=False,
):
x = (
self.attn(
x,
media,
media_locations=media_locations,
use_cached_media=use_cached_media,
)
* self.attn_gate.tanh()
+ x
)
x = self.ff(x) * self.ff_gate.tanh() + x
return x
# Path: open_flamingo/open_flamingo/src/utils.py
def getattr_recursive(obj, att):
"""
Return nested attribute of obj
Example: getattr_recursive(obj, 'a.b.c') is equivalent to obj.a.b.c
"""
if att == "":
return obj
i = att.find(".")
if i < 0:
return getattr(obj, att)
else:
return getattr_recursive(getattr(obj, att[:i]), att[i + 1 :])
# Path: open_flamingo/open_flamingo/src/utils.py
def setattr_recursive(obj, att, val):
"""
Set nested attribute of obj
Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val
"""
if "." in att:
obj = getattr_recursive(obj, ".".join(att.split(".")[:-1]))
setattr(obj, att.split(".")[-1], val)
# Path: open_flamingo/open_flamingo/src/flamingo_lm.py
import torch.nn as nn
import copy
from .helpers import GatedCrossAttentionBlock
from .utils import getattr_recursive, setattr_recursive
class FlamingoLayer(nn.Module):
"""
FlamingoLayer is a wrapper around the GatedCrossAttentionBlock and DecoderLayer.
"""
def __init__(
self, gated_cross_attn_layer, decoder_layer, gradient_checkpointing=False, residual=False
):
super().__init__()
self.gated_cross_attn_layer = gated_cross_attn_layer
self.decoder_layer = decoder_layer
self.vis_x = None
self.media_locations = None
self.residual = residual
if self.gated_cross_attn_layer is not None:
self.gated_cross_attn_layer._use_gradient_checkpointing = (
gradient_checkpointing
)
self.decoder_layer._use_gradient_checkpointing = gradient_checkpointing
def clone_parameters(self):
self.res_layer = copy.deepcopy(self.gated_cross_attn_layer)
if self.res_layer is not None:
self.res_layer.requires_grad_(False)
def is_conditioned(self) -> bool:
"""Check whether the layer is conditioned."""
return self.vis_x is not None and self.media_locations is not None
# Used this great idea from this implementation of Flamingo (https://github.com/dhansmair/flamingo-mini/)
def condition_vis_x(self, vis_x):
self.vis_x = vis_x
def condition_media_locations(self, media_locations):
self.media_locations = media_locations
def condition_use_cached_media(self, use_cached_media):
self.use_cached_media = use_cached_media
def forward(
self,
lang_x,
attention_mask=None,
**decoder_layer_kwargs,
):
# Cross attention
if self.gated_cross_attn_layer is not None:
if self.vis_x is None:
raise ValueError("vis_x must be conditioned before forward pass")
if self.media_locations is None:
raise ValueError(
"media_locations must be conditioned before forward pass"
)
lang_x = self.gated_cross_attn_layer(
lang_x,
self.vis_x,
media_locations=self.media_locations,
use_cached_media=self.use_cached_media,
)
# Residual
if self.residual and self.res_layer is not None:
lang_x_res = self.res_layer(
lang_x,
self.vis_x,
media_locations=self.media_locations,
attend_previous=self.attend_previous,
)
lang_x = (lang_x + lang_x_res) / 2.0
# Normal decoder layer
lang_x = self.decoder_layer(
lang_x, attention_mask=attention_mask, **decoder_layer_kwargs
)
return lang_x
class FlamingoLMMixin(nn.Module):
"""
Mixin to add cross-attention layers to a language model.
"""
def set_decoder_layers_attr_name(self, decoder_layers_attr_name):
self.decoder_layers_attr_name = decoder_layers_attr_name
def _get_decoder_layers(self):
| return getattr_recursive(self, self.decoder_layers_attr_name) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: XinyuanLiao/ComplexNN
# Path: complexNN/functional.py
def complexRelu(inp):
return torch.complex(relu(inp.real), relu(inp.imag))
# Path: complexNN/functional.py
def complexGelu(inp):
return torch.complex(gelu(inp.real), gelu(inp.imag))
# Path: complexNN/functional.py
def complexTanh(inp):
return torch.complex(tanh(inp.real), tanh(inp.imag))
# Path: complexNN/functional.py
def complexSigmoid(inp):
return torch.complex(sigmoid(inp.real), sigmoid(inp.imag))
# Path: complexNN/functional.py
def complexMaxPool2d(inp, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False):
"""
copy from https://github.com/wavefrontshaping/complexPyTorch
"""
absolute_value, indices = max_pool2d(inp.abs(), kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, ceil_mode=ceil_mode, return_indices=True)
absolute_value = absolute_value.type(torch.complex64)
angle = torch.atan2(inp.imag, inp.real)
angle = _retrieve_elements_from_indices(angle, indices)
return absolute_value * (
torch.cos(angle).type(torch.complex64)
+ 1j * torch.sin(angle).type(torch.complex64)
)
# Path: complexNN/functional.py
def complexAvgPool2d(inp, *args, **kwargs):
"""
copy from https://github.com/wavefrontshaping/complexPyTorch
"""
absolute_value_real = avg_pool2d(inp.real, *args, **kwargs)
absolute_value_imag = avg_pool2d(inp.imag, *args, **kwargs)
return absolute_value_real.type(torch.complex64) + 1j * absolute_value_imag.type(
torch.complex64
)
# Path: complexNN/functional.py
def complexAvgPool1d(inp, *args, **kwargs):
absolute_value_real = avg_pool1d(inp.real, *args, **kwargs)
absolute_value_imag = avg_pool1d(inp.imag, *args, **kwargs)
return absolute_value_real.type(torch.complex64) + 1j * absolute_value_imag.type(
torch.complex64
)
# Path: complexNN/functional.py
def complexDropout(inp, p=0.5, training=True):
"""
copy from https://github.com/wavefrontshaping/complexPyTorch
"""
mask = torch.ones(*inp.shape, dtype=torch.float32, device=inp.device)
mask = dropout(mask, p, training) * 1 / (1 - p)
mask.type(inp.dtype)
return mask * inp
# Path: complexNN/functional.py
def complexDropout2d(inp, p=0.5, training=True):
"""
copy from https://github.com/wavefrontshaping/complexPyTorch
"""
mask = torch.ones(*inp.shape, dtype=torch.float32, device=inp.device)
mask = dropout2d(mask, p, training) * 1 / (1 - p)
mask.type(inp.dtype)
return mask * inp
# Path: complexNN/functional.py
def complexElu(inp):
return torch.complex(elu(inp.real), elu(inp.imag))
# Path: complexNN/functional.py
def complexLeakyRelu(inp):
return torch.complex(leaky_relu(inp.real), leaky_relu(inp.imag))
# Path: complexNN/functional.py
def complexSoftmax(inp):
return torch.complex(softmax(inp.real), softmax(inp.imag))
# Path: complexNN/nn.py
import numpy as np
import torch
import torch.nn as nn
from complexNN.functional import complexRelu, complexGelu, complexTanh, complexSigmoid, complexMaxPool2d, \
complexAvgPool2d, complexAvgPool1d, complexDropout, complexDropout2d, complexElu, complexLeakyRelu, complexSoftmax
class cRelu(nn.Module):
@staticmethod
def forward(inp):
return complexRelu(inp)
class cElu(nn.Module):
@staticmethod
def forward(inp):
return complexElu(inp)
class cLeakyRelu(nn.Module):
@staticmethod
def forward(inp):
return complexLeakyRelu(inp)
class cSoftmax(nn.Module):
@staticmethod
def forward(inp):
return complexSoftmax(inp)
class cGelu(nn.Module):
@staticmethod
def forward(inp):
return complexGelu(inp)
class cTanh(nn.Module):
@staticmethod
def forward(inp):
return complexTanh(inp)
class cSigmoid(nn.Module):
@staticmethod
def forward(inp):
| return complexSigmoid(inp) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: sanmusen214/BAAH
# Path: modules/configs/defaultSettings.py
# Path: modules/configs/settingMaps.py
def configname2screenshotname(configfilename):
"""
根据config文件名,返回截图文件名
config文件名包含后缀不包含路径
"""
screenshotfilehash = hashlib.sha1(configfilename.encode('utf-8')).hexdigest()
# 如果长度大于8,截取前8位
if len(screenshotfilehash) > 8:
screenshotfilehash = screenshotfilehash[:8]
# 如果长度小于8,补0
elif len(screenshotfilehash) < 8:
screenshotfilehash = screenshotfilehash.zfill(8)
return screenshotfilehash + ".png"
# Path: modules/configs/MyConfig.py
import json
import logging
import os
import time
from modules.configs.defaultSettings import defaultUserDict, defaultSoftwareDict
from modules.configs.settingMaps import configname2screenshotname
# 程序入口应当先import这个类,然后调用parse_user_config方法解析该config实例
# 然后程序入口再import其他模块,在其他模块中import这个类,就可以直接使用这个类的实例了
class MyConfigger:
"""
维护config字典,包含软件config,用户任务config,语言包
"""
NOWVERSION="1.2.0"
USER_CONFIG_FOLDER="./BAAH_CONFIGS"
SOFTWARE_CONFIG_FOLDER="./DATA/CONFIGS"
LANGUAGE_PACKAGE_FOLDER="./DATA/i18n"
SOFTWARE_CONFIG_NAME="software_config.json"
# 读取config这个py里面的配置
def __init__(self):
self.current_dir = os.getcwd()
# 软件的config
self.softwareconfigdict = {}
# 软件的语言包
self.languagepackagedict = {}
# 一次区服任务的config
self.userconfigdict = {}
# 一次区服任务运行的session
self.sessiondict = {}
# 读取软件的config
self.parse_software_config(self.SOFTWARE_CONFIG_NAME)
def parse_user_config(self, file_name):
"""
读取config文件并解析
同时会清空sessiondict
"""
file_path = os.path.join(self.current_dir, self.USER_CONFIG_FOLDER, file_name)
# 字典新值
self.userconfigdict = self._read_config_file(file_path)
# 清空sessiondict
self.sessiondict = {}
# 检查缺失的配置
self._check_user_config()
# 强制设置截图文件名为配置名
self.userconfigdict["SCREENSHOT_NAME"] = configname2screenshotname(file_name)
# 检查截图文件夹路径里是否有DATA, 如果没有DATA,说明是1.1.x版本的配置,需要转换
if "DATA" not in self.userconfigdict["PIC_PATH"]:
| fromkey = defaultUserDict["PIC_PATH"]["m"]["from"] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lucidrains/gateloop-transformer
# Path: gateloop_transformer/gateloop_transformer.py
class RMSNorm(Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.gamma
# Path: gateloop_transformer/associative_scan.py
def associative_scan(
operator: Callable,
elems: Tuple[Tensor, Tensor]
):
num_elems = int(elems[0].shape[1])
if not all(int(elem.shape[1]) == num_elems for elem in elems[1:]):
raise ValueError('Array inputs to associative_scan must have the same '
'first dimension. (saw: {})'
.format([elem.shape for elem in elems]))
def _scan(elems):
"""Perform scan on `elems`."""
num_elems = elems[0].shape[1]
if num_elems < 2:
return elems
# Combine adjacent pairs of elements.
reduced_elems = operator(
[elem[:, :-1:2] for elem in elems],
[elem[:, 1::2] for elem in elems])
# Recursively compute scan for partially reduced tensors.
odd_elems = _scan(reduced_elems)
if num_elems % 2 == 0:
even_elems = operator(
[e[:, :-1] for e in odd_elems],
[e[:, 2::2] for e in elems])
else:
even_elems = operator(
odd_elems,
[e[:, 2::2] for e in elems])
# The first element of a scan is the same as the first element
# of the original `elems`.
even_elems = [
torch.cat([elem[:, :1], result], dim=1)
for (elem, result) in zip(elems, even_elems)]
return list(map(_interleave, even_elems, odd_elems))
return _scan(elems)
# Path: gateloop_transformer/simplified_gate_loop.py
from functools import partial
from torch import nn, Tensor
from torch.nn import Module
from typing import Tuple
from einops import rearrange, pack, unpack
from einops.layers.torch import Rearrange
from gateloop_transformer.gateloop_transformer import RMSNorm
from gateloop_transformer.associative_scan import associative_scan
from jax import jit, numpy as jnp
from jax.lax import associative_scan
from jax2torch import jax2torch
import torch
# plain pytorch non-fused associative scan
def exists(v):
return v is not None
def abs_clamp_eps(t, eps = 1e-20):
sign = torch.sign(t)
return sign * t.abs().clamp(min = eps)
# associative scan using heinsen sequences
# https://github.com/glassroom/heinsen_sequence
# graciously shared to the world by Franz A. Heinsen in https://arxiv.org/abs/2311.06281 in October 2023
def heinsen_associative_scan(a, kv, eps = 1e-20):
log_a = a.clamp(min = eps).log()
log_kv = abs_clamp_eps(kv, eps = eps).to(dtype = torch.complex64).log()
a_star = torch.cumsum(log_a, dim = 1)
log_x0_plus_b_star = torch.logcumsumexp(log_kv - a_star, dim = 1)
log_x = a_star + log_x0_plus_b_star
return a_star.exp().real, log_x.exp().real
# naive associative scan with some torchscript of binary operator
@torch.jit.script
def binary_operator(
a: Tuple[Tensor, Tensor],
b: Tuple[Tensor, Tensor]
):
a_i, kv_i = a
a_j, kv_j = b
return a_j * a_i, torch.addcmul(kv_j, a_j, kv_i)
# gate loop operator
def gate_loop_operator(q, kv, a, cache = None, heinsen = False):
if exists(cache):
cache_a, cache_kv = cache
a, a_ps = pack([cache_a, a], 'b * d')
kv, kv_ps = pack([cache_kv, kv], 'b * d')
if heinsen:
a, kv = heinsen_associative_scan(a, kv)
else:
| a, kv = associative_scan(binary_operator, (a, kv)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: QingruZhang/PASTA
# Path: evaluation/utils/env_utils.py
ENV_DATA_DIR = "CM_DATA_DIR"
ENV_MODELS_DIR = "CM_MODELS_DIR"
ENV_RESULTS_DIR = "CM_RESULTS_DIR"
DEFAULT_DATA_DIR = "data"
DEFAULT_MODELS_DIR = "models"
DEFAULT_RESULTS_DIR = "results"
def maybe_relative_to_repo(path: PathLike) -> pathlib.Path:
def read_path(name: str, default: PathLike) -> pathlib.Path:
def determine_data_dir(default: PathLike = DEFAULT_DATA_DIR) -> pathlib.Path:
def determine_models_dir(default: PathLike = DEFAULT_MODELS_DIR) -> pathlib.Path:
def determine_results_dir(default: PathLike = DEFAULT_RESULTS_DIR) -> pathlib.Path:
# Path: evaluation/utils/lang_utils.py
def _cmudict() -> dict[str, list]:
def determine_article(word: str, default: str = "a") -> str:
# Path: evaluation/utils/typing.py
# Path: evaluation/data.py
import argparse
import csv
import json
import logging
import pickle
import random
import datasets
import numpy
import scipy.sparse
import spacy
import wget
from collections import defaultdict
from functools import cache
from itertools import chain
from pathlib import Path
from typing import Any, Sequence, TypedDict, cast
from evaluation.utils import env_utils, lang_utils
from evaluation.utils.typing import Dataset, PathLike, StrSequence
from sklearn.feature_extraction.text import TfidfVectorizer
from tqdm.auto import tqdm
"""Datasets for evaluating context mediation in LMs."""
logger = logging.getLogger(__name__)
SUPPORTED_DATASETS = ("counterfact", "winoventi", "biosbias", "mcrae")
ROME_BASE_URL = "https://rome.baulab.info/data/dsets"
COUNTERFACT_URL = f"{ROME_BASE_URL}/counterfact.json"
ATTRIBUTE_SNIPPETS_URL = f"{ROME_BASE_URL}/attribute_snippets.json"
TFIDF_IDF_URL = f"{ROME_BASE_URL}/idf.npy"
TFIDF_VOCAB_URL = f"{ROME_BASE_URL}/tfidf_vocab.json"
WINOVENTI_URL = "https://raw.githubusercontent.com/commonsense-exception/commonsense-exception/main/data/winoventi_bert_large_final.tsv"
_MCRAE_BLACKLISTED_FEATURE_PREFIXES = ("bought/sold", "eg -", "killed", "king of")
_MCRAE_SPLITTABLE_FEATURE_PREFIXES = (
"associated with",
"an",
"a",
"becomes a",
"causes",
"comes from",
"comes in",
"comes on",
"different",
"found at",
"found below",
"found by",
"found in",
"found on",
"found over",
"found near",
"has an",
"has a",
"has",
"is an",
"is attached to",
"is a",
"is",
"like a",
"made by",
"made of",
"made with",
"made from",
"owned by",
"part of a",
"part of",
"requires a",
"requires",
"used as",
"used at",
"used by",
"used for",
"used in",
"used on",
"used with",
"uses",
)
_BIOS_BIAS_BLACKLISTED_NAMES = frozenset(
{
"Non-Residential",
}
)
# These prefixes do not make as much sense when put in front of the first name, so
# we'll try to remove them as much as possible.
_BIOS_BIAS_PREFIXES = (
"professor",
"prof.",
"prof",
"dr.",
"dr",
"doctor",
"mr.",
"mr",
"ms.",
"ms",
"mrs.",
"mrs",
"rev.",
"rev",
"pastor",
)
_COUNTERFACT_PARAPHRASE_PROMPT_ARTIFACTS = (" (b. ", "(tr. ", "(min. ")
class ContextMediationSample(TypedDict):
"""Single sample that can be used for context mediation analysis."""
id: str # Identifier
entity: str # "Barack Obama"
attribute: str # "invented the iPhone"
context: str # "Everyone knows that Barack Obama invented the iPhone."
prompt: str # "Barack Obama received a degree in"
target_mediated: str | None # "computer science" or not set for generation
target_unmediated: str | None # "law" or not set for generation
source: dict | None # Where this sample was derived from, e.g. counterfact sample.
class ContextMediationBatch(TypedDict):
"""Batch of context mediation samples."""
| id: StrSequence |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Ljzd-PRO/KToolBox
# Path: ktoolbox/configuration.py
class APIConfiguration(BaseModel):
class DownloaderConfiguration(BaseModel):
class PostStructureConfiguration(BaseModel):
class JobConfiguration(BaseModel):
class LoggerConfiguration(BaseModel):
class Configuration(BaseSettings):
# Path: ktoolbox/enum.py
class RetCodeEnum(IntEnum):
"""Enum for ``BaseRet.code``"""
Success = 0
GeneralFailure = -1
# APIRet
NetWorkError = 1001
JsonDecodeError = 1002
ValidationError = 1003
# ActionRet
MissingParameter = 2001
# DownloaderRet
FileExisted = 3001
# Path: ktoolbox/utils.py
class BaseRet(BaseModel, Generic[_T]):
"""Base data model of function return value"""
code: int = RetCodeEnum.Success.value
message: str = ''
exception: Optional[Exception] = None
data: Optional[_T] = None
model_config = ConfigDict(arbitrary_types_allowed=True)
def __bool__(self):
return self.code == RetCodeEnum.Success
# Path: ktoolbox/utils.py
def generate_msg(title: str = None, **kwargs):
"""
Generate message for ``BaseRet`` and logger
:param title: Message title
:param kwargs: Extra data
"""
title: str = title or ""
return f"{title} - {kwargs}" if kwargs else title
# Path: ktoolbox/api/base.py
from abc import ABC, abstractmethod
from typing import Literal, Generic, TypeVar, Optional, Callable
from urllib.parse import urlunparse
from loguru import logger
from pydantic import BaseModel, ValidationError, RootModel
from tenacity import RetryCallState, wait_fixed, retry_if_result
from tenacity.stop import stop_base, stop_never, stop_after_attempt
from ktoolbox.configuration import config
from ktoolbox.enum import RetCodeEnum
from ktoolbox.utils import BaseRet, generate_msg
import httpx
import tenacity
__all__ = ["APITenacityStop", "APIRet", "BaseAPI"]
_T = TypeVar('_T')
class APITenacityStop(stop_base):
"""APIs Stop strategies"""
def __call__(self, retry_state: RetryCallState) -> bool:
if config.api.retry_times is None:
return stop_never(retry_state)
else:
return stop_after_attempt(config.api.retry_times)(retry_state)
def _retry_error_callback(state: RetryCallState) -> "APIRet":
"""
Call after all reties failed
:return Keep the origin return value
"""
# noinspection SpellCheckingInspection
logger.error(
generate_msg(
f"Kemono API call failed",
ret=state.outcome.result(),
)
)
return state.outcome.result()
def _retry(*args, **kwargs):
"""Wrap an API method with a new ``Retrying`` object"""
wrapper = tenacity.retry(
stop=APITenacityStop(),
wait=wait_fixed(config.api.retry_interval),
retry=retry_if_result(lambda x: not bool(x)),
reraise=True,
retry_error_callback=_retry_error_callback,
**kwargs
)
if len(args) == 1 and callable(args[0]):
return wrapper(args[0])
else:
return wrapper
| class APIRet(BaseRet[_T]): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: jpjacobpadilla/Google-Colab-Selenium
# Path: google_colab_selenium/colab_selenium_manager.py
class ColabSeleniumManager:
default_colab_options = [
'--headless',
'--no-sandbox',
'--disable-dev-shm-usage',
'--lang=en'
]
_downloaded_chrome = False
_updated_apt = False
update_apt = ['sudo', 'apt', 'update']
upgrade_apt = ['sudo', 'apt', 'upgrade']
download_command = ['curl', '-o', 'google-chrome-stable_current_amd64.deb', 'https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb']
install_command = ['sudo', 'apt', 'install', './google-chrome-stable_current_amd64.deb', '-y']
clean_up_command = ['rm', 'google-chrome-stable_current_amd64.deb']
chromedriver_path: str = None
def __init__(self, base_options: Options):
if not self._updated_apt:
self.update_upgrade_apt()
if not self._downloaded_chrome:
self.install_chrome()
self.options = self.default_options(base_options or Options())
self.service = self.get_service()
@classmethod
def update_upgrade_apt(cls) -> None:
try:
with Spinner('Updating and upgrading APT', done='Updated and upgraded APT'):
subprocess.run(cls.update_apt, check=True)
subprocess.run(cls.upgrade_apt, check=True)
except Exception as e:
raise GoogleColabSeleniumError('Failed to update and upgrade APT') from e
else:
cls._updated_apt = True
@classmethod
def install_chrome(cls) -> None:
"""
To Install Google-Chrome-Stable, the first command uses CURL to download
the debian file. Next Advanced Package Tool installs the file and once
it's installed, the .deb file, which is no longer needed, is deleted.
"""
try:
with Spinner('Downloading Google Chrome', done='Downloaded Google Chrome'):
subprocess.run(cls.download_command, check=True)
subprocess.run(cls.install_command, check=True)
subprocess.run(cls.clean_up_command, check=True)
except Exception as e:
raise InstallChromeError("Failed to install Google Chrome.") from e
else:
cls._downloaded_chrome = True
@classmethod
def default_options(cls, options: Options) -> Options:
for default in cls.default_colab_options:
options.add_argument(default)
return options
@classmethod
def get_service(cls) -> Service:
path = cls.chromedriver_path or cls.prepare_driver()
return Service(path)
@classmethod
def prepare_driver(cls) -> str:
try:
path = SeleniumManager().driver_location(Options())
cls.chromedriver_path = path
return path
except Exception as e:
raise ChromeDriverPathError("Failed to find ChromeDriver.") from e
# Path: google_colab_selenium/spinner.py
class Spinner:
def __init__(self, message: str, done: str):
self.message = message
self.done_message = done
self.stop_event = threading.Event()
def __enter__(self):
self.show_spinner(self.message)
return self
def __exit__(self, *args, **kwargs):
self.remove_spinner()
def show_spinner(self, text):
self.spinner_id = uuid.uuid4()
spinner_html = f"""
<div class="spinner-container">
<div class="spinner" id="{self.spinner_id}-circle"></div>
<div class="spinner-text" id="{self.spinner_id}-text">{text}</div>
</div>
<style>
@keyframes spin {{
from {{ transform: rotate(0deg); }}
to {{ transform: rotate(360deg); }}
}}
.spinner-container {{
display: flex;
align-items: center;
margin-bottom: 3px;
}}
.spinner {{
border: 3px solid rgba(0, 0, 0, 0.1);
border-left-color: lightblue;
border-radius: 50%;
width: 12px;
height: 12px;
animation: spin 1s linear infinite;
}}
.spinner-text {{
padding-left: 6px;
}}
</style>
"""
display(HTML(spinner_html))
def remove_spinner(self):
js_code = f"""
const element = document.getElementById("{self.spinner_id}-circle");
element.style.border = "3px solid limegreen";
element.style.animation = "none";
const text = document.getElementById("{self.spinner_id}-text");
text.innerText = "{self.done_message}";
"""
display(Javascript(js_code))
# Path: google_colab_selenium/exceptions.py
class StartingChromeDriverError(GoogleColabSeleniumError):
"""Exception raised when ChromeDriver fails to start."""
pass
# Path: google_colab_selenium/chromedriver.py
from google_colab_selenium.colab_selenium_manager import ColabSeleniumManager
from google_colab_selenium.spinner import Spinner
from google_colab_selenium.exceptions import StartingChromeDriverError
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
class ChromeDriver(webdriver.Chrome):
"""
A thin wrapper around the Selenium Chrome Webdriver which makes it easy
to use in Google Colab Notebooks.
The ColabSeleniumManager class installs Google-Chrome-Stable and adds the
nessasary headers to use in a Colab Notebook.
The headers that are automatically added are:
--headless
--no-sandbox
--disable-dev-shm-usage
--lang=en
"""
def __init__(self, options: Options = None, keep_alive: bool = True):
self.manager = ColabSeleniumManager(options)
try:
| with Spinner('Initializing Chromedriver', done='Initialized Chromedriver'): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: microsoft/monitors4codegen
# Path: tests/test_utils.py
@contextlib.contextmanager
def create_test_context(params: dict) -> Iterator[MultilspyContext]:
"""
Creates a test context for the given parameters.
"""
config = MultilspyConfig.from_dict(params)
logger = MultilspyLogger()
user_home_dir = os.path.expanduser("~")
multilspy_home_directory = str(pathlib.Path(user_home_dir, ".multilspy"))
temp_extract_directory = str(pathlib.Path(multilspy_home_directory, uuid4().hex))
try:
os.makedirs(temp_extract_directory, exist_ok=False)
assert params['repo_url'].endswith('/')
repo_zip_url = params['repo_url'] + f"archive/{params['repo_commit']}.zip"
FileUtils.download_and_extract_archive(logger, repo_zip_url, temp_extract_directory, "zip")
dir_contents = os.listdir(temp_extract_directory)
assert len(dir_contents) == 1
source_directory_path = str(pathlib.Path(temp_extract_directory, dir_contents[0]))
yield MultilspyContext(config, logger, source_directory_path)
finally:
if os.path.exists(temp_extract_directory):
shutil.rmtree(temp_extract_directory)
# Path: tests/test_utils.py
def is_cuda_available() -> bool:
"""
Returns True if CUDA is available, False otherwise
"""
if torch.cuda.is_available():
try:
t = torch.rand(1).cuda()
t = t * 2
return True
except RuntimeError:
return False
return False
# Path: tests/monitor_guided_decoding/test_numargs_monitor_java.py
import torch
import transformers
import pytest
from pathlib import PurePath
from monitors4codegen.multilspy.language_server import SyncLanguageServer
from monitors4codegen.multilspy.multilspy_config import Language
from tests.test_utils import create_test_context, is_cuda_available
from transformers import AutoTokenizer, AutoModelForCausalLM
from monitors4codegen.multilspy.multilspy_utils import TextUtils
from monitors4codegen.monitor_guided_decoding.monitors.numargs_monitor import NumMethodArgumentsMonitor
from monitors4codegen.monitor_guided_decoding.monitor import MonitorFileBuffer
from monitors4codegen.monitor_guided_decoding.hf_gen import MGDLogitsProcessor
from transformers.generation.utils import LogitsProcessorList
from monitors4codegen.multilspy.multilspy_types import Position
from monitors4codegen.monitor_guided_decoding.tokenizer_wrapper import HFTokenizerWrapper
"""
This file contains tests for Monitor-Guided Decoding for correct number of arguments in Java
"""
pytest_plugins = ("pytest_asyncio",)
@pytest.mark.asyncio
async def test_multilspy_java_clickhouse_highlevel_sinker_modified_numargs():
"""
Test the working of numargs_monitor with Java repository - clickhouse-highlevel-sinker modified
"""
code_language = Language.JAVA
params = {
"code_language": code_language,
"repo_url": "https://github.com/LakshyAAAgrawal/clickhouse-highlevel-sinker/",
"repo_commit": "5775fd7a67e7b60998e1614cf44a8a1fc3190ab0"
}
device = torch.device('cuda' if is_cuda_available() else 'cpu')
model: transformers.modeling_utils.PreTrainedModel = AutoModelForCausalLM.from_pretrained(
"bigcode/santacoder", trust_remote_code=True
).to(device)
tokenizer = AutoTokenizer.from_pretrained("bigcode/santacoder")
| with create_test_context(params) as context: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: bigai-nlco/langsuite
# Path: langsuite/envs/teach/libs/teach/dataset/initialization.py
class Initialization:
def __init__(
self, time_start, agents=None, objects=None, custom_object_metadata=None
):
self.time_start = time_start
self.agents = agents if agents is not None else []
self.objects = objects if objects is not None else []
self.custom_object_metadata = (
custom_object_metadata if custom_object_metadata is not None else {}
)
def add_agent(self, agent):
self.agents.append(agent)
def add_object(self, obj):
self.objects.append(obj)
def reset_time(self, time_desired=0):
# Note: We could Unix time or any desired time instead of 0
self.time_start = time_desired
def to_dict(self):
_dict = OrderedDict()
_dict["time_start"] = self.time_start
if len(self.agents) > 0:
_dict["agents"] = [
x if type(x) is dict else x.to_dict() for x in self.agents
]
if len(self.objects) > 0:
_dict["objects"] = [
x if type(x) is dict else x.to_dict() for x in self.objects
]
if self.custom_object_metadata is not None:
_dict["custom_object_metadata"] = self.custom_object_metadata
return _dict
@classmethod
def from_dict(cls, initialization_dict) -> "Initialization":
agents = []
objects = []
if "agents" in initialization_dict:
agents = [Pose_With_ID.from_dict(x) for x in initialization_dict["agents"]]
if "objects" in initialization_dict:
objects = [
Pose_With_ID.from_dict(x) for x in initialization_dict["objects"]
]
return cls(
time_start=initialization_dict["time_start"], agents=agents, objects=objects
)
# Path: langsuite/envs/teach/libs/teach/dataset/interaction.py
class Interaction:
def __init__(self, agent_id, action, is_object=False, status=None, time_start=None):
self.agent_id = agent_id
self.action = action
self.is_object = is_object
self.status = status
self.time_start = time_start
def to_dict(self):
_dict = OrderedDict()
if self.is_object:
_dict["object_id"] = self.agent_id
else:
_dict["agent_id"] = self.agent_id
_dict.update(self.action.to_dict())
if self.status is not None:
_dict["status"] = self.status
return _dict
@classmethod
def from_dict(cls, interaction_dict, action_type) -> "Interaction":
if "object_id" in interaction_dict:
is_object = True
agent_id = interaction_dict["object_id"]
else:
is_object = False
agent_id = interaction_dict["agent_id"]
if action_type == "Motion":
action = Action_Motion.from_dict(interaction_dict)
elif action_type == "MapGoal":
action = Action_MapGoal.from_dict(interaction_dict)
elif action_type == "ObjectInteraction":
action = Action_ObjectInteraction.from_dict(interaction_dict)
elif action_type == "ProgressCheck":
action = Action_ProgressCheck.from_dict(interaction_dict)
elif action_type == "Keyboard":
action = Action_Keyboard.from_dict(interaction_dict)
elif action_type == "Audio":
action = Action_Audio.from_dict(interaction_dict)
else:
action = Action_Basic.from_dict(interaction_dict)
status = interaction_dict.get("status")
time_start = interaction_dict.get("time_start")
return cls(
agent_id=agent_id,
action=action,
is_object=is_object,
status=status,
time_start=time_start,
)
# Path: langsuite/envs/teach/libs/teach/dataset/episode.py
from collections import OrderedDict
from langsuite.envs.teach.libs.teach.dataset.initialization import Initialization
from langsuite.envs.teach.libs.teach.dataset.interaction import Interaction
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from __future__ import annotations
class Episode:
def __init__(
self,
episode_id,
world,
world_type,
commander_embodied,
initial_state=None,
interactions=None,
):
self.episode_id = episode_id
self.world = world
self.world_type = world_type
self.commander_embodied = commander_embodied
self.initial_state = initial_state
self.interactions = interactions if interactions is not None else []
self.final_state = None
def reset_initial_state(self, initialization):
self.initialization = initialization
def add_interaction(self, interaction):
self.interactions.append(interaction)
def remove_interaction(self):
if len(self.interactions) > 0:
del self.interactions[-1]
def to_dict(self):
_dict = OrderedDict()
_dict["episode_id"] = self.episode_id
_dict["world"] = self.world
_dict["world_type"] = self.world_type
_dict["commander_embodied"] = str(self.commander_embodied)
if self.initial_state is not None:
_dict["initial_state"] = self.initial_state.to_dict()
_dict["interactions"] = [x.to_dict() for x in self.interactions]
if self.final_state is not None:
_dict["final_state"] = self.final_state.to_dict()
return _dict
@classmethod
def from_dict(cls, episode_dict, definitions, process_init_state=True) -> "Episode":
interactions = []
for interaction_dict in episode_dict.get("interactions"):
action_type = definitions.map_actions_id2info[
interaction_dict["action_id"]
]["action_type"]
interaction = Interaction.from_dict(interaction_dict, action_type)
interactions.append(interaction)
return cls(
episode_dict["episode_id"],
episode_dict["world"],
episode_dict["world_type"],
episode_dict["commander_embodied"],
| initial_state=Initialization.from_dict(episode_dict["initial_state"]) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: tmlr-group/DeepInception
# Path: config.py
FALCON_PATH = f"{ROOT_PATH}/falcon-7b-instruct"
# Path: config.py
LLAMA_PATH = f"{ROOT_PATH}/Llama-2-7b-hf"
# Path: config.py
TARGET_TEMP = 0
# Path: config.py
TARGET_TOP_P = 1
# Path: config.py
VICUNA_PATH = f"{ROOT_PATH}/vicuna-7b-v1.5"
# Path: language_models.py
class GPT(LanguageModel):
API_RETRY_SLEEP = 10
API_ERROR_OUTPUT = "$ERROR$"
API_QUERY_SLEEP = 2
API_MAX_RETRY = 5
API_TIMEOUT = 20
openai.api_key = os.getenv("OPENAI_API_KEY")
def generate(self, conv: List[Dict],
max_n_tokens: int,
temperature: float,
top_p: float):
'''
Args:
conv: List of dictionaries, OpenAI API format
max_n_tokens: int, max number of tokens to generate
temperature: float, temperature for sampling
top_p: float, top p for sampling
Returns:
str: generated response
'''
output = self.API_ERROR_OUTPUT
for _ in range(self.API_MAX_RETRY):
try:
response = openai.ChatCompletion.create(
model = self.model_name,
messages = conv,
max_tokens = max_n_tokens,
temperature = temperature,
)
output = response["choices"][0]["message"]["content"]
break
except openai.error.OpenAIError as e:
print(type(e), e)
time.sleep(self.API_RETRY_SLEEP)
time.sleep(self.API_QUERY_SLEEP)
return output
def batched_generate(self,
convs_list: List[List[Dict]],
max_n_tokens: int,
temperature: float,
top_p: float = 1.0,):
return [self.generate(conv, max_n_tokens, temperature, top_p) for conv in convs_list]
# Path: language_models.py
class HuggingFace(LanguageModel):
def __init__(self,model_name, model, tokenizer):
self.model_name = model_name
self.model = model
self.tokenizer = tokenizer
self.eos_token_ids = [self.tokenizer.eos_token_id]
def batched_generate(self,
full_prompts_list,
max_n_tokens: int,
temperature: float,
top_p: float = 1.0,):
inputs = self.tokenizer(full_prompts_list, return_tensors='pt', padding=True)
inputs = {k: v.to(self.model.device.index) for k, v in inputs.items()}
# Batch generation
if temperature > 0:
output_ids = self.model.generate(
**inputs,
max_new_tokens=max_n_tokens,
do_sample=True,
temperature=temperature,
eos_token_id=self.eos_token_ids,
top_p=top_p,
)
else:
output_ids = self.model.generate(
**inputs,
max_new_tokens=max_n_tokens,
do_sample=False,
eos_token_id=self.eos_token_ids,
top_p=1,
temperature=1, # To prevent warning messages
)
# If the model is not an encoder-decoder type, slice off the input tokens
if not self.model.config.is_encoder_decoder:
output_ids = output_ids[:, inputs["input_ids"].shape[1]:]
# Batch decoding
outputs_list = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)
for key in inputs:
inputs[key].to('cpu')
output_ids.to('cpu')
del inputs, output_ids
gc.collect()
torch.cuda.empty_cache()
return outputs_list
def extend_eos_tokens(self):
# Add closing braces for Vicuna/Llama eos when using attacker model
self.eos_token_ids.extend([
self.tokenizer.encode("}")[1],
29913,
9092,
16675])
# Path: conversers.py
import torch
import common
from transformers import AutoModelForCausalLM, AutoTokenizer
from config import (FALCON_PATH, LLAMA_PATH, TARGET_TEMP, TARGET_TOP_P,
VICUNA_PATH)
from language_models import GPT, HuggingFace
def load_attack_and_target_models(args):
targetLM = TargetLM(model_name = args.target_model,
max_n_tokens = args.target_max_n_tokens,
| temperature = TARGET_TEMP, # init to 0 |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: radekd91/inferno
# Path: inferno/datasets/ImageDatasetHelpers.py
def bbox2point(left, right, top, bottom, type='bbox'):
''' bbox from detector and landmarks are different
'''
if type == 'kpt68':
old_size = (right - left + bottom - top) / 2 * 1.1
center_x = right - (right - left) / 2.0
center_y = bottom - (bottom - top) / 2.0
# center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])
elif type == 'bbox':
old_size = (right - left + bottom - top) / 2
center_x = right - (right - left) / 2.0
center_y = bottom - (bottom - top) / 2.0 + old_size * 0.12
# center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size * 0.12])
elif type == "mediapipe":
old_size = (right - left + bottom - top) / 2 * 1.1
center_x = right - (right - left) / 2.0
center_y = bottom - (bottom - top) / 2.0
# center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])
else:
raise NotImplementedError(f" bbox2point not implemented for {type} ")
if isinstance(center_x, np.ndarray):
center = np.stack([center_x, center_y], axis=1)
else:
center = np.array([center_x, center_y])
return old_size, center
# Path: inferno/datasets/ImageDatasetHelpers.py
def bbpoint_warp(image, center, size, target_size_height, target_size_width=None, output_shape=None, inv=True, landmarks=None,
order=3 # order of interpolation, bicubic by default
):
target_size_width = target_size_width or target_size_height
tform = point2transform(center, size, target_size_height, target_size_width)
tf = tform.inverse if inv else tform
output_shape = output_shape or (target_size_height, target_size_width)
dst_image = warp(image, tf, output_shape=output_shape, order=order)
if landmarks is None:
return dst_image
# points need the matrix
if isinstance(landmarks, np.ndarray):
assert isinstance(landmarks, np.ndarray)
tf_lmk = tform if inv else tform.inverse
dst_landmarks = tf_lmk(landmarks[:, :2])
elif isinstance(landmarks, list):
tf_lmk = tform if inv else tform.inverse
dst_landmarks = []
for i in range(len(landmarks)):
dst_landmarks += [tf_lmk(landmarks[i][:, :2])]
elif isinstance(landmarks, dict):
tf_lmk = tform if inv else tform.inverse
dst_landmarks = {}
for key, value in landmarks.items():
dst_landmarks[key] = tf_lmk(landmarks[key][:, :2])
else:
raise ValueError("landmarks must be np.ndarray, list or dict")
return dst_image, dst_landmarks
# Path: inferno/datasets/FaceAlignmentTools.py
import numpy as np
import skvideo
import types
from pathlib import Path
from inferno.datasets.ImageDatasetHelpers import bbox2point, bbpoint_warp
def align_face(image, landmarks, landmark_type, scale_adjustment, target_size_height, target_size_width=None,):
"""
Returns an image with the face aligned to the center of the image.
:param image: The full resolution image in which to align the face.
:param landmarks: The landmarks of the face in the image (in the original image coordinates).
:param landmark_type: The type of landmarks. Such as 'kpt68' or 'bbox' or 'mediapipe'.
:param scale_adjustment: The scale adjustment to apply to the image.
:param target_size_height: The height of the output image.
:param target_size_width: The width of the output image. If not provided, it is assumed to be the same as target_size_height.
:return: The aligned face image. The image will be in range [0,1].
"""
# landmarks_for_alignment = "mediapipe"
left = landmarks[:,0].min()
top = landmarks[:,1].min()
right = landmarks[:,0].max()
bottom = landmarks[:,1].max()
old_size, center = bbox2point(left, right, top, bottom, type=landmark_type)
size = (old_size * scale_adjustment).astype(np.int32)
| img_warped, lmk_warped = bbpoint_warp(image, center, size, target_size_height, target_size_width, landmarks=landmarks) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: hxz393/ConfigCenterComparer
# Path: config/settings.py
SQL_CONFIG_NACOS = """
SELECT
data_id,
group_id,
content,
gmt_modified
FROM
config_info
"""
# Path: config/settings.py
SQL_CONFIG_APOLLO_ID = """
SELECT
n.AppId,
n.NamespaceName,
i.`Key`,
i.`Value`,
i.DataChange_LastTime
FROM
Item i
INNER JOIN Namespace n ON i.NamespaceId = n.Id
WHERE
i.IsDeleted = 0
AND i.`Key` != '';
"""
# Path: config/settings.py
SQL_CONFIG_APOLLO_NAME = """
SELECT
App.Name,
n.NamespaceName,
i.`Key`,
i.`Value`,
i.DataChange_LastTime
FROM
Item i
INNER JOIN Namespace n ON i.NamespaceId = n.Id
INNER JOIN App ON n.AppId = App.AppId
WHERE
i.IsDeleted = 0
AND i.`Key` != '';
"""
# Path: config/settings.py
APOLLO_NAME_LIST = ['AppId', 'Name', ]
# Path: module/get_query_sql.py
import logging
from typing import Dict, Optional
from config.settings import SQL_CONFIG_NACOS, SQL_CONFIG_APOLLO_ID, SQL_CONFIG_APOLLO_NAME, APOLLO_NAME_LIST
"""
此模块用于处理配置中心相关的查询,包括从不同的配置中心获取 SQL 查询语句。
本模块提供了 `get_query_sql` 函数,用于根据配置中心类型和 Apollo 应用名称获取对应的查询 SQL。支持从 Nacos 和 Apollo 配置中心获取数据。
:author: assassing
:contact: https://github.com/hxz393
:copyright: Copyright 2023, hxz393. 保留所有权利。
"""
logger = logging.getLogger(__name__)
def get_query_sql(config_main: Dict[str, str]) -> Optional[str]:
"""
根据配置中心类型和 Apollo 应用名称获取查询 SQL。
此函数接收一个字典,包含配置中心类型和 Apollo 应用名称。它根据配置中心类型(Nacos 或 Apollo)以及 Apollo 应用名称('AppId' 或 'Name'),返回相应的 SQL 查询语句。
:param config_main: 包含配置中心类型和 Apollo 应用名称的字典。
:type config_main: Dict[str, str]
:return: 对应的查询 SQL 语句。如果无法匹配到合适的配置中心或应用名称,则返回 None。
:rtype: Optional[str]
:example:
>>> get_query_sql({"config_center": "Nacos"})
SQL_CONFIG_NACOS
>>> get_query_sql({"config_center": "Apollo", "apollo_name": "AppId"})
SQL_CONFIG_APOLLO_ID
>>> get_query_sql({"config_center": "Apollo", "apollo_name": "Name"})
SQL_CONFIG_APOLLO_NAME
"""
try:
config_center = config_main.get('config_center')
apollo_name = config_main.get('apollo_name')
if config_center == 'Nacos':
return SQL_CONFIG_NACOS
| elif config_center == 'Apollo' and apollo_name in APOLLO_NAME_LIST: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: pytorch-labs/ao
# Path: torchao/quantization/quant_primitives.py
def dynamically_quantize_per_channel(x, quant_min, quant_max, target_dtype):
# assumes symmetric quantization
# assumes axis == 0
# assumes dense memory format
# TODO(future): relax ^ as needed
# default setup for affine quantization of activations
eps = torch.finfo(torch.float32).eps
# get min and max
min_val, max_val = torch.aminmax(x, dim=1)
# calculate scale and zero point based on min and max
# reference: https://fburl.com/code/srbiybme
min_val_neg = torch.min(min_val, torch.zeros_like(min_val))
max_val_pos = torch.max(max_val, torch.zeros_like(max_val))
device = min_val_neg.device
# reference: https://fburl.com/code/4wll53rk
max_val_pos = torch.max(-min_val_neg, max_val_pos)
scale = max_val_pos / (float(quant_max - quant_min) / 2)
# ensure scale is the same dtype as the original tensor
scale = torch.clamp(scale, min=eps).to(x.dtype)
zero_point = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device)
# quantize based on qmin/qmax/scale/zp
# reference: torch/ao/quantization/fx/_decomposed.py?lines=63
x_div = x.transpose(0, 1) / scale
x_round = torch.round(x_div)
x_zp = x_round + zero_point
x_zp = x_zp.transpose(0, 1)
quant = torch.clamp(x_zp, quant_min, quant_max).to(target_dtype)
return quant, scale, zero_point
# Path: torchao/quantization/quant_primitives.py
def quant_int8_dynamic_per_token_linear(
x,
w_vals_int8_t,
w_scales,
bias,
out_dtype,
):
# like F.linear, but with int8 dynamic quantization of activation,
# and a quantized weight
x_vals_int8, x_scales = quantize_activation_per_token_absmax(x)
mm_out = quant_int8_per_token_matmul(
x_vals_int8, x_scales, w_vals_int8_t, w_scales, out_dtype
)
if bias is not None:
mm_out += bias
return mm_out
# Path: torchao/quantization/smoothquant.py
import torch
import torch.nn.functional as F
import torchao.quantization.quant_api as quant_api
from .quant_primitives import (
dynamically_quantize_per_channel,
quant_int8_dynamic_per_token_linear,
)
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Testing out accuracy-only implementation of SmoothQuant
(https://arxiv.org/pdf/2211.10438.pdf)
Note: this is an application of input-weight equalization, with the addition that the
multiplication by scale is fused into the preceding layer, specifically for relevant
parts of transformer blocks.
"""
__all__ = [
"get_scale",
"SmoothFakeDynQuantMixin",
"SmoothFakeDynamicallyQuantizedLinear",
"swap_linear_with_smooth_fq_linear",
"smooth_fq_linear_to_inference",
"set_smooth_fq_attribute",
]
def get_scale(X_absmax, W_absmax, alpha=0.5):
"""
Calculate the scale based on abs(max(X)), abs(max(W)) and alpha
If X is of dimension `b*n*k` and W is dimension `k*m`, the returned
scale is of dimension `k`.
Note: X_absmax is calculated outside of this function because we
need to keep a running version of it during calibration. W_absmax
is calculated outside of this function for consistency with X_absmax.
"""
X_pow = torch.pow(X_absmax, alpha)
W_pow = torch.pow(W_absmax, 1.0 - alpha)
div = X_pow / W_pow
return div.reshape(-1)
class SmoothFakeDynQuantMixin(torch.nn.Module):
def init_smoothquant_variables(self, alpha):
self.calibrating = True
self.x_running_abs_max = None
self.register_buffer("smooth_scale", None)
self.alpha = alpha
# debug only
self.debug_skip_scaling = False
# self.debug_skip_scaling = True
# Currently torch._int_mm cuBLAS underlying kernel does not work with
# non-contiguous weight. However, torch.compil'ing through
# torch._int_mm leads to triton code which is ~2x faster if the weight
# is transposed. So, for now we have a debug flag to toggle whether
# we store the quantized weight transposed, so that we can get correct
# numerics both in eager mode and after torch.compile.
# The default is True for cuBLAS / eager mode, set to False for
# torch.compile.
# self.store_w_int_repr_t = True
self.store_w_int_repr_t = False
def update_x_running_abs_max(self, X):
# update the running max of incoming activations
all_dims_except_last = tuple(range(len(X.shape) - 1))
cur_abs_max = torch.amax(torch.abs(X), dim=all_dims_except_last)
if self.x_running_abs_max is None:
self.x_running_abs_max = cur_abs_max
else:
self.x_running_abs_max = torch.max(cur_abs_max, self.x_running_abs_max)
def get_scaled_quantized_w(self):
# inference
assert (
self.smooth_scale is not None
), "self.smooth_scale is None, did you turn on inference?"
W = self.weight
# scale weight
# in the future, this can be done ahead of time instead of
# during inference
if not self.debug_skip_scaling:
# TODO(future): do below in `to_inference` instead of here
W = torch.matmul(
torch.diag(self.smooth_scale), W.transpose(0, 1)
).transpose(0, 1)
# fake quantize input and weight, and then do matmul in fp32/fp16
# in the future, this should be replaced with quantized kernels which
# work on NVIDIA GPUs (such as protoquant's implementation)
W_dq_dtype = W.dtype
| W_int_repr, W_scales, W_zps = dynamically_quantize_per_channel( |